diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000000..3f14953ec8f --- /dev/null +++ b/.gitmodules @@ -0,0 +1,16 @@ +[submodule "lib/ansible/modules/core"] + path = lib/ansible/modules/core + url = https://github.com/ansible/ansible-modules-core.git + branch = devel +[submodule "lib/ansible/modules/extras"] + path = lib/ansible/modules/extras + url = https://github.com/ansible/ansible-modules-extras.git + branch = devel +[submodule "v2/ansible/modules/core"] + path = v2/ansible/modules/core + url = https://github.com/ansible/ansible-modules-core.git + branch = devel +[submodule "v2/ansible/modules/extras"] + path = v2/ansible/modules/extras + url = https://github.com/ansible/ansible-modules-extras.git + branch = devel diff --git a/CHANGELOG.md b/CHANGELOG.md index 62850280c89..57d855544de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,39 @@ Ansible Changes By Release ========================== -## 1.8 "You Really Got Me" - Active Development +## 1.9 "Dancing In the Street" - ACTIVE DEVELOPMENT + +in progress, details pending + +* Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally. +* Safety changes: several modules have force parameters that defaulted to true. + These have been changed to default to false so as not to accidentally lose + work. Playbooks that depended on the former behaviour simply to add + force=True to the task that needs it. Affected modules: + + * bzr: When local modifications exist in a checkout, the bzr module used to + default to temoving the modifications on any operation. Now the module + will not remove the modifications unless force=yes is specified. + Operations that depend on a clean working tree may fail unless force=yes is + added. + * git: When local modifications exist in a checkout, the git module will now + fail unless force is explictly specified. Specifying force will allow the + module to revert and overwrite local modifications to make git actions + succeed. + * hg: When local modifications exist in a checkout, the hg module used to + default to removing the modifications on any operation. Now the module + will not remove the modifications unless force=yes is specified. + * subversion: When updating a checkout with local modifications, you now need + to add force so the module will revert the modifications before updating. + +## 1.8.1 "You Really Got Me" - Nov 26, 2014 + +* Various bug fixes in postgresql and mysql modules. +* Fixed a bug related to lookup plugins used within roles not finding files based on the relative paths to the roles files/ directory. +* Fixed a bug related to vars specified in plays being templated too early, resulting in incorrect variable interpolation. +* Fixed a bug related to git submodules in bare repos. + +## 1.8 "You Really Got Me" - Nov 25, 2014 Major changes: @@ -16,6 +48,10 @@ Major changes: * command_warnings feature will warn about when usage of the shell/command module can be simplified to use core modules - this can be enabled in ansible.cfg * new omit value can be used to leave off a parameter when not set, like so module_name: a=1 b={{ c | default(omit) }}, would not pass value for b (not even an empty value) if c was not set. * developers: 'baby JSON' in module responses, originally intended for writing modules in bash, is removed as a feature to simplify logic, script module remains available for running bash scripts. +* async jobs started in "fire & forget" mode can now be checked on at a later time. +* added ability to subcategorize modules for docs.ansible.com +* added ability for shipped modules to have aliases with symlinks +* added ability to deprecate older modules by starting with "_" and including "deprecated: message why" in module docs New Modules: @@ -31,6 +67,9 @@ New Modules: Some other notable changes: +* added the ability to set "instance filters" in the ec2.ini to limit results from the inventory plugin. +* upgrades for various variable precedence items and parsing related items +* added a new "follow" parameter to the file and copy modules, which allows actions to be taken on the target of a symlink rather than the symlink itself. * if a module should ever traceback, it will return a standard error, catchable by ignore_errors, versus an 'unreachable' * ec2_lc: added support for multiple new parameters like kernel_id, ramdisk_id and ebs_optimized. * ec2_elb_lb: added support for the connection_draining_timeout and cross_az_load_balancing options. @@ -53,10 +92,42 @@ Some other notable changes: * various parser improvements * produce a friendly error message if the SSH key is too permissive * ec2_ami_search: support for SSD and IOPS provisioned EBS images +* can set ansible_sudo_exe as an inventory variable which allows specifying + a different sudo (or equivalent) command +* git module: Submodule handling has changed. Previously if you used the + ``recursive`` parameter to handle submodules, ansible would track the + submodule upstream's head revision. This has been changed to checkout the + version of the submodule specified in the superproject's git repository. + This is inline with what git submodule update does. If you want the old + behaviour use the new module parameter track_submodules=yes +* Checksumming of transferred files has been made more portable and now uses + the sha1 algorithm instead of md5 to be compatible with FIPS-140. + - As a small side effect, the fetch module no longer returns a useful value + in remote_md5. If you need a replacement, switch to using remote_checksum + which returns the sha1sum of the remote file. +* ansible-doc CLI tool contains various improvements for working with different terminals And various other bug fixes and improvements ... +## 1.7.2 "Summer Nights" - Sep 24, 2014 + +- Fixes a bug in accelerate mode which caused a traceback when trying to use that connection method. +- Fixes a bug in vault where the password file option was not being used correctly internally. +- Improved multi-line parsing when using YAML literal blocks (using > or |). +- Fixed a bug with the file module and the creation of relative symlinks. +- Fixed a bug where checkmode was not being honored during the templating of files. +- Other various bug fixes. + +## 1.7.1 "Summer Nights" - Aug 14, 2014 + +- Security fix to disallow specifying 'args:' as a string, which could allow the insertion of extra module parameters through variables. +- Performance enhancements related to previous security fixes, which could cause slowness when modules returned very large JSON results. This specifically impacted the unarchive module frequently, which returns the details of all unarchived files in the result. +- Docker module bug fixes: + * Fixed support for specifying rw/ro bind modes for volumes + * Fixed support for allowing the tag in the image parameter +- Various other bug fixes + ## 1.7 "Summer Nights" - Aug 06, 2014 Major new features: diff --git a/MANIFEST.in b/MANIFEST.in index 4fb0c04a4ee..948d1761392 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,11 +1,11 @@ include README.md packaging/rpm/ansible.spec COPYING include examples/hosts include examples/ansible.cfg -graft examples/playbooks -include packaging/distutils/setup.py include lib/ansible/module_utils/powershell.ps1 +recursive-include lib/ansible/modules * recursive-include docs * -recursive-include library * include Makefile include VERSION include MANIFEST.in +prune lib/ansible/modules/core/.git +prune lib/ansible/modules/extras/.git diff --git a/Makefile b/Makefile index afd7162f96e..52fa1b53281 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ # make deb-src -------------- produce a DEB source # make deb ------------------ produce a DEB # make docs ----------------- rebuild the manpages (results are checked in) -# make tests ---------------- run the tests +# make tests ---------------- run the tests (see test/README.md for requirements) # make pyflakes, make pep8 -- source code checks ######################################################## @@ -86,12 +86,20 @@ MOCK_CFG ?= NOSETESTS ?= nosetests +NOSETESTS3 ?= nosetests-3.3 + ######################################################## all: clean python tests: - PYTHONPATH=./lib ANSIBLE_LIBRARY=./library $(NOSETESTS) -d -w test/units -v + PYTHONPATH=./lib $(NOSETESTS) -d -w test/units -v # Could do: --with-coverage --cover-package=ansible + +newtests: + PYTHONPATH=./v2:./lib $(NOSETESTS) -d -w v2/test -v --with-coverage --cover-package=ansible --cover-branches + +newtests-py3: + PYTHONPATH=./v2:./lib $(NOSETESTS3) -d -w v2/test -v --with-coverage --cover-package=ansible --cover-branches authors: sh hacking/authors.sh @@ -114,7 +122,7 @@ pep8: @echo "# Running PEP8 Compliance Tests" @echo "#############################################" -pep8 -r --ignore=E501,E221,W291,W391,E302,E251,E203,W293,E231,E303,E201,E225,E261,E241 lib/ bin/ - -pep8 -r --ignore=E501,E221,W291,W391,E302,E251,E203,W293,E231,E303,E201,E225,E261,E241 --filename "*" library/ + # -pep8 -r --ignore=E501,E221,W291,W391,E302,E251,E203,W293,E231,E303,E201,E225,E261,E241 --filename "*" library/ pyflakes: pyflakes lib/ansible/*.py lib/ansible/*/*.py bin/* diff --git a/README.md b/README.md index cfb6fc4891c..8bfe58a5433 100644 --- a/README.md +++ b/README.md @@ -4,22 +4,25 @@ Ansible ======= -Ansible is a radically simple configuration-management, application deployment, task-execution, and multinode orchestration engine. +Ansible is a radically simple IT automation system. It handles configuration-management, application deployment, cloud provisioning, ad-hoc task-execution, and multinode orchestration - including trivializing things like zero downtime rolling updates with load balancers. Read the documentation and more at http://ansible.com/ -Many users run straight from the development branch (it's generally fine to do so), but you might also wish to consume a release. You can find -instructions [here](http://docs.ansible.com/intro_getting_started.html) for a variety of platforms. If you want a tarball of the last release, go to [releases.ansible.com](http://releases.ansible.com/ansible) and you can also install with pip. +Many users run straight from the development branch (it's generally fine to do so), but you might also wish to consume a release. + +You can find instructions [here](http://docs.ansible.com/intro_getting_started.html) for a variety of platforms. If you decide to go with the development branch, be sure to run "git submodule update --init --recursive" after doing a checkout. + +If you want to download a tarball of a release, go to [releases.ansible.com](http://releases.ansible.com/ansible), though most users use yum (using the EPEL instructions linked above), apt (using the PPA instructions linked above), or "pip install ansible". Design Principles ================= * Have a dead simple setup process and a minimal learning curve - * Be super fast & parallel by default - * Require no server or client daemons; use existing SSHd - * Use a language that is both machine and human friendly + * Manage machines very quickly and in parallel + * Avoid custom-agents and additional open ports, be agentless by leveraging the existing SSH daemon + * Describe infrastructure in a language that is both machine and human friendly * Focus on security and easy auditability/review/rewriting of content - * Manage remote machines instantly, without bootstrapping + * Manage new remote machines instantly, without bootstrapping any software * Allow module development in any dynamic language, not just Python * Be usable as non-root * Be the easiest IT automation system to use, ever. @@ -27,8 +30,11 @@ Design Principles Get Involved ============ - * Read [Contributing.md](https://github.com/ansible/ansible/blob/devel/CONTRIBUTING.md) for all kinds of ways to contribute to and interact with the project, including mailing list information and how to submit bug reports and code to Ansible. + * Read [Community Information](http://docs.ansible.com/community.html) for all kinds of ways to contribute to and interact with the project, including mailing list information and how to submit bug reports and code to Ansible. * All code submissions are done through pull requests. Take care to make sure no merge commits are in the submission, and use "git rebase" vs "git merge" for this reason. If submitting a large code change (other than modules), it's probably a good idea to join ansible-devel and talk about what you would like to do or add first and to avoid duplicate efforts. This not only helps everyone know what's going on, it also helps save time and effort if we decide some changes are needed. + * Users list: [ansible-project](http://groups.google.com/group/ansible-project) + * Development list: [ansible-devel](http://groups.google.com/group/ansible-devel) + * Announcement list: [ansible-announce](http://groups.google.com/group/ansible-announce) - read only * irc.freenode.net: #ansible Branch Info @@ -36,14 +42,14 @@ Branch Info * Releases are named after Van Halen songs. * The devel branch corresponds to the release actively under development. + * As of 1.8, modules are kept in different repos, you'll want to follow [core](https://github.com/ansible/ansible-modules-core) and [extras](https://github.com/ansible/ansible-modules-extras) * Various release-X.Y branches exist for previous releases. - * We'd love to have your contributions, read "CONTRIBUTING.md" for process notes. + * We'd love to have your contributions, read [Community Information](http://docs.ansible.com/community.html) for notes on how to get started. -Author -====== +Authors +======= -Ansible was created by Michael DeHaan (michael@ansible.com) and has contributions from over -800 users (and growing). Thanks everyone! +Ansible was created by [Michael DeHaan](https://github.com/mpdehaan) (michael.dehaan/gmail/com) and has contributions from over 900 users (and growing). Thanks everyone! -[Ansible, Inc](http://ansible.com) +Ansible is sponsored by [Ansible, Inc](http://ansible.com) diff --git a/RELEASES.txt b/RELEASES.txt index 01d880dfa55..ddcce78efab 100644 --- a/RELEASES.txt +++ b/RELEASES.txt @@ -4,11 +4,14 @@ Ansible Releases at a Glance Active Development ++++++++++++++++++ -1.8 "You Really Got Me" ---- FALL 2014 +1.9 "Dancing In the Street" - in progress Released ++++++++ +1.8.1 "You Really Got Me" -- 11-26-2014 +1.7.2 "Summer Nights" -------- 09-24-2014 +1.7.1 "Summer Nights" -------- 08-14-2014 1.7 "Summer Nights" -------- 08-06-2014 1.6.10 "The Cradle Will Rock" - 07-25-2014 1.6.9 "The Cradle Will Rock" - 07-24-2014 diff --git a/VERSION b/VERSION index 6259340971b..2e0e38c63a6 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.8 +1.9 diff --git a/bin/ansible b/bin/ansible index 792b259144a..5aaaa582a7e 100755 --- a/bin/ansible +++ b/bin/ansible @@ -19,6 +19,17 @@ ######################################################## +__requires__ = ['ansible'] +try: + import pkg_resources +except Exception: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. But we + # have code that better expresses the errors in the places where the code + # is actually used (the deps are optional for many code paths) so we don't + # want to fail here. + pass + import os import sys @@ -90,26 +101,6 @@ class Cli(object): pattern = args[0] - """ - inventory_manager = inventory.Inventory(options.inventory) - if options.subset: - inventory_manager.subset(options.subset) - hosts = inventory_manager.list_hosts(pattern) - if len(hosts) == 0: - callbacks.display("No hosts matched", stderr=True) - sys.exit(0) - - if options.listhosts: - for host in hosts: - callbacks.display(' %s' % host) - sys.exit(0) - - if ((options.module_name == 'command' or options.module_name == 'shell') - and not options.module_args): - callbacks.display("No argument passed to %s module" % options.module_name, color='red', stderr=True) - sys.exit(1) - """ - sshpass = None sudopass = None su_pass = None @@ -129,6 +120,8 @@ class Cli(object): if not options.ask_vault_pass and options.vault_password_file: vault_pass = utils.read_vault_file(options.vault_password_file) + extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass) + inventory_manager = inventory.Inventory(options.inventory, vault_password=vault_pass) if options.subset: inventory_manager.subset(options.subset) @@ -177,7 +170,8 @@ class Cli(object): su=options.su, su_pass=su_pass, su_user=options.su_user, - vault_pass=vault_pass + vault_pass=vault_pass, + extra_vars=extra_vars, ) if options.seconds: diff --git a/bin/ansible-doc b/bin/ansible-doc index d949c8e5376..8bab0e76cd4 100755 --- a/bin/ansible-doc +++ b/bin/ansible-doc @@ -25,6 +25,10 @@ import re import optparse import datetime import subprocess +import fcntl +import termios +import struct + from ansible import utils from ansible.utils import module_docs import ansible.constants as C @@ -33,7 +37,8 @@ import traceback MODULEDIR = C.DEFAULT_MODULE_PATH -BLACKLIST_EXTS = ('.swp', '.bak', '~', '.rpm') +BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm') +IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"] _ITALIC = re.compile(r"I\(([^)]+)\)") _BOLD = re.compile(r"B\(([^)]+)\)") @@ -70,7 +75,7 @@ def pager(text): pager_print(text) else: pager_pipe(text, os.environ['PAGER']) - elif hasattr(os, 'system') and os.system('(less) 2> /dev/null') == 0: + elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0: pager_pipe(text, 'less') else: pager_print(text) @@ -94,7 +99,7 @@ def get_man_text(doc): desc = " ".join(doc['description']) text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=" ", subsequent_indent=" ")) - + if 'option_keys' in doc and len(doc['option_keys']) > 0: text.append("Options (= is mandatory):\n") @@ -164,7 +169,15 @@ def get_snippet_text(doc): return "\n".join(text) def get_module_list_text(module_list): + tty_size = 0 + if os.isatty(0): + tty_size = struct.unpack('HHHH', + fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1] + columns = max(60, tty_size) + displace = max(len(x) for x in module_list) + linelimit = columns - displace - 5 text = [] + deprecated = [] for module in sorted(set(module_list)): if module in module_docs.BLACKLIST_MODULES: @@ -181,15 +194,45 @@ def get_module_list_text(module_list): try: doc, plainexamples = module_docs.get_docstring(filename) - desc = tty_ify(doc.get('short_description', '?')) - if len(desc) > 55: - desc = desc + '...' - text.append("%-20s %-60.60s" % (module, desc)) + desc = tty_ify(doc.get('short_description', '?')).strip() + if len(desc) > linelimit: + desc = desc[:linelimit] + '...' + + if module.startswith('_'): # Handle deprecated + deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc)) + else: + text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc)) except: traceback.print_exc() sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module) + + if len(deprecated) > 0: + text.append("\nDEPRECATED:") + text.extend(deprecated) return "\n".join(text) +def find_modules(path, module_list): + + if os.path.isdir(path): + for module in os.listdir(path): + if module.startswith('.'): + continue + elif os.path.isdir(module): + find_modules(module, module_list) + elif any(module.endswith(x) for x in BLACKLIST_EXTS): + continue + elif module.startswith('__'): + continue + elif module in IGNORE_FILES: + continue + elif module.startswith('_'): + fullpath = '/'.join([path,module]) + if os.path.islink(fullpath): # avoids aliases + continue + + module = os.path.splitext(module)[0] # removes the extension + module_list.append(module) + def main(): p = optparse.OptionParser( @@ -222,23 +265,18 @@ def main(): utils.plugins.module_finder.add_directory(i) if options.list_dir: - # list all modules + # list modules paths = utils.plugins.module_finder._get_paths() module_list = [] for path in paths: - # os.system("ls -C %s" % (path)) - if os.path.isdir(path): - for module in os.listdir(path): - if any(module.endswith(x) for x in BLACKLIST_EXTS): - continue - module_list.append(module) + find_modules(path, module_list) pager(get_module_list_text(module_list)) sys.exit() if len(args) == 0: p.print_help() - + def print_paths(finder): ''' Returns a string suitable for printing of the search path ''' @@ -248,14 +286,13 @@ def main(): if i not in ret: ret.append(i) return os.pathsep.join(ret) - + text = '' for module in args: filename = utils.plugins.module_finder.find_plugin(module) if filename is None: - sys.stderr.write("module %s not found in %s\n" % (module, - print_paths(utils.plugins.module_finder))) + sys.stderr.write("module %s not found in %s\n" % (module, print_paths(utils.plugins.module_finder))) continue if any(filename.endswith(x) for x in BLACKLIST_EXTS): diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy index 146361da93f..45dc8564f81 100755 --- a/bin/ansible-galaxy +++ b/bin/ansible-galaxy @@ -48,6 +48,9 @@ galaxy_info: author: {{ author }} description: {{description}} company: {{ company }} + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: {{ issue_tracker_url }} # Some suggested licenses: # - BSD (default) # - MIT @@ -135,6 +138,7 @@ An optional section for the role authors to include contact information, or a we #------------------------------------------------------------------------------------- VALID_ACTIONS = ("init", "info", "install", "list", "remove") +SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) def get_action(args): """ @@ -237,6 +241,7 @@ def exit_without_ignore(options, rc=1): print '- you can use --ignore-errors to skip failed roles.' sys.exit(rc) + #------------------------------------------------------------------------------------- # Galaxy API functions #------------------------------------------------------------------------------------- @@ -257,7 +262,7 @@ def api_get_config(api_server): except: return None -def api_lookup_role_by_name(api_server, role_name): +def api_lookup_role_by_name(api_server, role_name, notify=True): """ Uses the Galaxy API to do a lookup on the role owner/name. """ @@ -268,7 +273,8 @@ def api_lookup_role_by_name(api_server, role_name): parts = role_name.split(".") user_name = ".".join(parts[0:-1]) role_name = parts[-1] - print "- downloading role '%s', owned by %s" % (role_name, user_name) + if notify: + print "- downloading role '%s', owned by %s" % (role_name, user_name) except: parser.print_help() print "- invalid role name (%s). Specify role as format: username.rolename" % role_name @@ -377,7 +383,7 @@ def scm_archive_role(scm, role_url, role_version, role_name): print " in directory %s" % tempdir return False - shutil.rmtree(tempdir) + shutil.rmtree(tempdir, ignore_errors=True) return temp_file.name @@ -640,7 +646,7 @@ def execute_init(args, options, parser): categories = [] if not offline: categories = api_get_list(api_server, "categories") or [] - + # group the list of platforms from the api based # on their names, with the release field being # appended to a list of versions @@ -653,6 +659,7 @@ def execute_init(args, options, parser): author = 'your name', company = 'your company (optional)', license = 'license (GPLv2, CC-BY, etc)', + issue_tracker_url = 'http://example.com/issue/tracker', min_ansible_version = '1.2', platforms = platform_groups, categories = categories, @@ -676,7 +683,56 @@ def execute_info(args, options, parser): from the galaxy API. """ - pass + if len(args) == 0: + # the user needs to specify a role + parser.print_help() + print "- you must specify a user/role name" + sys.exit(1) + + api_server = get_opt(options, "api_server", "galaxy.ansible.com") + api_config = api_get_config(api_server) + roles_path = get_opt(options, "roles_path") + + for role in args: + + role_info = {} + + install_info = get_galaxy_install_info(role, options) + if install_info: + if 'version' in install_info: + install_info['intalled_version'] = install_info['version'] + del install_info['version'] + role_info.update(install_info) + + remote_data = api_lookup_role_by_name(api_server, role, False) + if remote_data: + role_info.update(remote_data) + + metadata = get_role_metadata(role, options) + if metadata: + role_info.update(metadata) + + role_spec = ansible.utils.role_spec_parse(role) + if role_spec: + role_info.update(role_spec) + + if role_info: + print "- %s:" % (role) + for k in sorted(role_info.keys()): + + if k in SKIP_INFO_KEYS: + continue + + if isinstance(role_info[k], dict): + print "\t%s: " % (k) + for key in sorted(role_info[k].keys()): + if key in SKIP_INFO_KEYS: + continue + print "\t\t%s: %s" % (key, role_info[k][key]) + else: + print "\t%s: %s" % (k, role_info[k]) + else: + print "- the role %s was not found" % role def execute_install(args, options, parser): """ @@ -687,30 +743,23 @@ def execute_install(args, options, parser): """ role_file = get_opt(options, "role_file", None) - api_server = get_opt(options, "api_server", "galaxy.ansible.com") - no_deps = get_opt(options, "no_deps", False) - roles_path = get_opt(options, "roles_path") - if len(args) == 0 and not role_file: + if len(args) == 0 and role_file is None: # the user needs to specify one of either --role-file # or specify a single user/role name parser.print_help() print "- you must specify a user/role name or a roles file" sys.exit() - elif len(args) == 1 and role_file: + elif len(args) == 1 and not role_file is None: # using a role file is mutually exclusive of specifying # the role name on the command line parser.print_help() print "- please specify a user/role name, or a roles file, but not both" sys.exit(1) - # error checking to ensure the specified roles path exists and is a directory - if not os.path.exists(roles_path): - print "- the specified role path %s does not exist" % roles_path - sys.exit(1) - elif not os.path.isdir(roles_path): - print "- the specified role path %s is not a directory" % roles_path - sys.exit(1) + api_server = get_opt(options, "api_server", "galaxy.ansible.com") + no_deps = get_opt(options, "no_deps", False) + roles_path = get_opt(options, "roles_path") roles_done = [] if role_file: @@ -759,10 +808,11 @@ def execute_install(args, options, parser): role_data = api_lookup_role_by_name(api_server, role_src) if not role_data: print "- sorry, %s was not found on %s." % (role_src, api_server) + exit_without_ignore(options) continue role_versions = api_fetch_role_related(api_server, 'versions', role_data['id']) - if "version" not in role: + if "version" not in role or role['version'] == '': # convert the version names to LooseVersion objects # and sort them to get the latest version. If there # are no versions in the list, we'll grab the head @@ -787,7 +837,8 @@ def execute_install(args, options, parser): if tmp_file: installed = install_role(role.get("name"), role.get("version"), tmp_file, options) # we're done with the temp file, clean it up - os.unlink(tmp_file) + if tmp_file != role_src: + os.unlink(tmp_file) # install dependencies, if we want them if not no_deps and installed: if not role_data: @@ -809,8 +860,6 @@ def execute_install(args, options, parser): else: print '- dependency %s is already installed, skipping.' % dep["name"] if not tmp_file or not installed: - if tmp_file and installed: - os.unlink(tmp_file) print "- %s was NOT installed successfully." % role.get("name") exit_without_ignore(options) sys.exit(0) diff --git a/bin/ansible-playbook b/bin/ansible-playbook index 96e87de3eb0..a3b29581795 100755 --- a/bin/ansible-playbook +++ b/bin/ansible-playbook @@ -18,8 +18,16 @@ ####################################################### -#__requires__ = ['ansible'] -#import pkg_resources +__requires__ = ['ansible'] +try: + import pkg_resources +except Exception: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. But we + # have code that better expresses the errors in the places where the code + # is actually used (the deps are optional for many code paths) so we don't + # want to fail here. + pass import sys import os @@ -75,8 +83,6 @@ def main(args): ) #parser.add_option('--vault-password', dest="vault_password", # help="password for vault encrypted files") - parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", - help="set additional variables as key=value or YAML/JSON", default=[]) parser.add_option('-t', '--tags', dest='tags', default='all', help="only run plays and tasks tagged with these values") parser.add_option('--skip-tags', dest='skip_tags', @@ -134,17 +140,7 @@ def main(args): if not options.ask_vault_pass and options.vault_password_file: vault_pass = utils.read_vault_file(options.vault_password_file) - extra_vars = {} - for extra_vars_opt in options.extra_vars: - if extra_vars_opt.startswith("@"): - # Argument is a YAML file (JSON is a subset of YAML) - extra_vars = utils.combine_vars(extra_vars, utils.parse_yaml_from_file(extra_vars_opt[1:], vault_password=vault_pass)) - elif extra_vars_opt and extra_vars_opt[0] in '[{': - # Arguments as YAML - extra_vars = utils.combine_vars(extra_vars, utils.parse_yaml(extra_vars_opt)) - else: - # Arguments as Key-value - extra_vars = utils.combine_vars(extra_vars, utils.parse_kv(extra_vars_opt)) + extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass) only_tags = options.tags.split(",") skip_tags = options.skip_tags @@ -158,9 +154,23 @@ def main(args): raise errors.AnsibleError("the playbook: %s does not appear to be a file" % playbook) inventory = ansible.inventory.Inventory(options.inventory, vault_password=vault_pass) - inventory.subset(options.subset) + + # Note: slightly wrong, this is written so that implicit localhost + # (which is not returned in list_hosts()) is taken into account for + # warning if inventory is empty. But it can't be taken into account for + # checking if limit doesn't match any hosts. Instead we don't worry about + # limit if only implicit localhost was in inventory to start with. + # + # Fix this in v2 + no_hosts = False if len(inventory.list_hosts()) == 0: - raise errors.AnsibleError("provided hosts list is empty") + # Empty inventory + utils.warning("provided hosts list is empty, only localhost is available") + no_hosts = True + inventory.subset(options.subset) + if len(inventory.list_hosts()) == 0 and no_hosts is False: + # Invalid limit + raise errors.AnsibleError("Specified --limit does not match any hosts") # run all playbooks specified on the command line for playbook in args: @@ -276,7 +286,7 @@ def main(args): retries = failed_hosts + unreachable_hosts - if len(retries) > 0: + if C.RETRY_FILES_ENABLED and len(retries) > 0: filename = pb.generate_retry_inventory(retries) if filename: display(" to retry, use: --limit @%s\n" % filename) diff --git a/bin/ansible-pull b/bin/ansible-pull index 4f4da24d831..a9a0897fbff 100755 --- a/bin/ansible-pull +++ b/bin/ansible-pull @@ -40,7 +40,6 @@ import os import shutil -import subprocess import sys import datetime import socket @@ -135,6 +134,12 @@ def main(args): help="vault password file") parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', help='ask for sudo password') + parser.add_option('-t', '--tags', dest='tags', default=False, + help='only run plays and tasks tagged with these values') + parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', + help='adds the hostkey for the repo url if not already added') + parser.add_option('--key-file', dest='key_file', + help="Pass '-i ' to the SSH arguments used by git.") options, args = parser.parse_args(args) hostname = socket.getfqdn() @@ -149,7 +154,7 @@ def main(args): return 1 now = datetime.datetime.now() - print >>sys.stderr, now.strftime("Starting ansible-pull at %F %T") + print now.strftime("Starting ansible-pull at %F %T") # Attempt to use the inventory passed in as an argument # It might not yet have been downloaded so use localhost if note @@ -168,6 +173,15 @@ def main(args): if options.checkout: repo_opts += ' version=%s' % options.checkout + + # Only git module is supported + if options.module_name == DEFAULT_REPO_TYPE: + if options.accept_host_key: + repo_opts += ' accept_hostkey=yes' + + if options.key_file: + repo_opts += ' key_file=%s' % options.key_file + path = utils.plugins.module_finder.find_plugin(options.module_name) if path is None: sys.stderr.write("module '%s' not found.\n" % options.module_name) @@ -175,6 +189,8 @@ def main(args): cmd = 'ansible localhost -i "%s" %s -m %s -a "%s"' % ( inv_opts, base_opts, options.module_name, repo_opts ) + for ev in options.extra_vars: + cmd += ' -e "%s"' % ev if options.sleep: try: @@ -192,7 +208,7 @@ def main(args): if rc != 0: if options.force: - print "Unable to update repository. Continuing with (forced) run of playbook." + print >>sys.stderr, "Unable to update repository. Continuing with (forced) run of playbook." else: return rc elif options.ifchanged and '"changed": true' not in out: @@ -214,6 +230,8 @@ def main(args): cmd += ' -e "%s"' % ev if options.ask_sudo_pass: cmd += ' -K' + if options.tags: + cmd += ' -t "%s"' % options.tags os.chdir(options.dest) # RUN THE PLAYBOOK COMMAND diff --git a/bin/ansible-vault b/bin/ansible-vault index 09f08d54874..22cfc0e1487 100755 --- a/bin/ansible-vault +++ b/bin/ansible-vault @@ -15,13 +15,19 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # -# ansible-pull is a script that runs ansible in local mode -# after checking out a playbooks directory from source repo. There is an -# example playbook to bootstrap this script in the examples/ dir which -# installs ansible and sets it up to run on cron. +# ansible-vault is a script that encrypts/decrypts YAML files. See +# http://docs.ansible.com/playbooks_vault.html for more details. -#__requires__ = ['ansible'] -#import pkg_resources +__requires__ = ['ansible'] +try: + import pkg_resources +except Exception: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. But we + # have code that better expresses the errors in the places where the code + # is actually used (the deps are optional for many code paths) so we don't + # want to fail here. + pass import os import sys diff --git a/docs/man/man1/ansible-doc.1 b/docs/man/man1/ansible-doc.1 index 041cf48099e..2d5068d0d3a 100644 --- a/docs/man/man1/ansible-doc.1 +++ b/docs/man/man1/ansible-doc.1 @@ -1,13 +1,13 @@ '\" t .\" Title: ansible-doc .\" Author: :doctype:manpage -.\" Generator: DocBook XSL Stylesheets v1.76.1 -.\" Date: 05/26/2014 +.\" Generator: DocBook XSL Stylesheets v1.78.1 +.\" Date: 12/09/2014 .\" Manual: System administration commands -.\" Source: Ansible 1.7 +.\" Source: Ansible 1.9 .\" Language: English .\" -.TH "ANSIBLE\-DOC" "1" "05/26/2014" "Ansible 1\&.7" "System administration commands" +.TH "ANSIBLE\-DOC" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -64,9 +64,3 @@ Ansible is released under the terms of the GPLv3 License\&. \fBansible\-playbook\fR(1), \fBansible\fR(1), \fBansible\-pull\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible -.SH "AUTHOR" -.PP -\fB:doctype:manpage\fR -.RS 4 -Author. -.RE diff --git a/docs/man/man1/ansible-galaxy.1 b/docs/man/man1/ansible-galaxy.1 index 5bac353505f..eac74b6a85d 100644 --- a/docs/man/man1/ansible-galaxy.1 +++ b/docs/man/man1/ansible-galaxy.1 @@ -1,13 +1,13 @@ '\" t .\" Title: ansible-galaxy .\" Author: [see the "AUTHOR" section] -.\" Generator: DocBook XSL Stylesheets v1.76.1 -.\" Date: 05/26/2014 +.\" Generator: DocBook XSL Stylesheets v1.78.1 +.\" Date: 12/09/2014 .\" Manual: System administration commands -.\" Source: Ansible 1.7 +.\" Source: Ansible 1.9 .\" Language: English .\" -.TH "ANSIBLE\-GALAXY" "1" "05/26/2014" "Ansible 1\&.7" "System administration commands" +.TH "ANSIBLE\-GALAXY" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -149,6 +149,11 @@ Force overwriting an existing role\&. .RS 4 The path in which the skeleton role will be created\&.The default is the current working directory\&. .RE +.PP +\fB\-\-offline\fR +.RS 4 +Don\(cqt query the galaxy API when creating roles +.RE .SH "LIST" .sp The \fBlist\fR sub\-command is used to show what roles are currently instaled\&. You can specify a role name, and if installed only that role will be shown\&. diff --git a/docs/man/man1/ansible-galaxy.1.asciidoc.in b/docs/man/man1/ansible-galaxy.1.asciidoc.in index b8a80e6b2c5..3d59e317063 100644 --- a/docs/man/man1/ansible-galaxy.1.asciidoc.in +++ b/docs/man/man1/ansible-galaxy.1.asciidoc.in @@ -122,6 +122,10 @@ Force overwriting an existing role. The path in which the skeleton role will be created.The default is the current working directory. +*--offline*:: + +Don't query the galaxy API when creating roles + LIST ---- diff --git a/docs/man/man1/ansible-playbook.1 b/docs/man/man1/ansible-playbook.1 index 63f8904f0c0..7ae5308713e 100644 --- a/docs/man/man1/ansible-playbook.1 +++ b/docs/man/man1/ansible-playbook.1 @@ -1,13 +1,13 @@ '\" t .\" Title: ansible-playbook .\" Author: :doctype:manpage -.\" Generator: DocBook XSL Stylesheets v1.76.1 -.\" Date: 05/26/2014 +.\" Generator: DocBook XSL Stylesheets v1.78.1 +.\" Date: 12/09/2014 .\" Manual: System administration commands -.\" Source: Ansible 1.7 +.\" Source: Ansible 1.9 .\" Language: English .\" -.TH "ANSIBLE\-PLAYBOOK" "1" "05/26/2014" "Ansible 1\&.7" "System administration commands" +.TH "ANSIBLE\-PLAYBOOK" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -176,9 +176,3 @@ Ansible is released under the terms of the GPLv3 License\&. \fBansible\fR(1), \fBansible\-pull\fR(1), \fBansible\-doc\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible -.SH "AUTHOR" -.PP -\fB:doctype:manpage\fR -.RS 4 -Author. -.RE diff --git a/docs/man/man1/ansible-pull.1 b/docs/man/man1/ansible-pull.1 index 58029eabb84..a9b69788b47 100644 --- a/docs/man/man1/ansible-pull.1 +++ b/docs/man/man1/ansible-pull.1 @@ -1,13 +1,13 @@ '\" t .\" Title: ansible .\" Author: :doctype:manpage -.\" Generator: DocBook XSL Stylesheets v1.76.1 -.\" Date: 05/26/2014 +.\" Generator: DocBook XSL Stylesheets v1.78.1 +.\" Date: 12/09/2014 .\" Manual: System administration commands -.\" Source: Ansible 1.7 +.\" Source: Ansible 1.9 .\" Language: English .\" -.TH "ANSIBLE" "1" "05/26/2014" "Ansible 1\&.7" "System administration commands" +.TH "ANSIBLE" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -31,7 +31,7 @@ ansible-pull \- set up a remote copy of ansible on each managed node .SH "SYNOPSIS" .sp -ansible \-d DEST \-U URL [options] [ ] +ansible\-pull \-d DEST \-U URL [options] [ ] .SH "DESCRIPTION" .sp \fBAnsible\fR is an extra\-simple tool/framework/API for doing \*(Aqremote things\*(Aq over SSH\&. @@ -104,9 +104,3 @@ Ansible is released under the terms of the GPLv3 License\&. \fBansible\fR(1), \fBansible\-playbook\fR(1), \fBansible\-doc\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible -.SH "AUTHOR" -.PP -\fB:doctype:manpage\fR -.RS 4 -Author. -.RE diff --git a/docs/man/man1/ansible-pull.1.asciidoc.in b/docs/man/man1/ansible-pull.1.asciidoc.in index 39a8de0b0e1..d75fc637946 100644 --- a/docs/man/man1/ansible-pull.1.asciidoc.in +++ b/docs/man/man1/ansible-pull.1.asciidoc.in @@ -12,7 +12,7 @@ ansible-pull - set up a remote copy of ansible on each managed node SYNOPSIS -------- -ansible -d DEST -U URL [options] [ ] +ansible-pull -d DEST -U URL [options] [ ] DESCRIPTION diff --git a/docs/man/man1/ansible-vault.1 b/docs/man/man1/ansible-vault.1 index f353e3269fb..286e642748d 100644 --- a/docs/man/man1/ansible-vault.1 +++ b/docs/man/man1/ansible-vault.1 @@ -1,13 +1,13 @@ '\" t .\" Title: ansible-vault .\" Author: [see the "AUTHOR" section] -.\" Generator: DocBook XSL Stylesheets v1.76.1 -.\" Date: 05/26/2014 +.\" Generator: DocBook XSL Stylesheets v1.78.1 +.\" Date: 12/09/2014 .\" Manual: System administration commands -.\" Source: Ansible 1.7 +.\" Source: Ansible 1.9 .\" Language: English .\" -.TH "ANSIBLE\-VAULT" "1" "05/26/2014" "Ansible 1\&.7" "System administration commands" +.TH "ANSIBLE\-VAULT" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- diff --git a/docs/man/man1/ansible.1 b/docs/man/man1/ansible.1 index 233428782ed..b6fe5be756a 100644 --- a/docs/man/man1/ansible.1 +++ b/docs/man/man1/ansible.1 @@ -1,13 +1,13 @@ '\" t .\" Title: ansible .\" Author: :doctype:manpage -.\" Generator: DocBook XSL Stylesheets v1.76.1 -.\" Date: 05/26/2014 +.\" Generator: DocBook XSL Stylesheets v1.78.1 +.\" Date: 12/09/2014 .\" Manual: System administration commands -.\" Source: Ansible 1.7 +.\" Source: Ansible 1.9 .\" Language: English .\" -.TH "ANSIBLE" "1" "05/26/2014" "Ansible 1\&.7" "System administration commands" +.TH "ANSIBLE" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -206,9 +206,3 @@ Ansible is released under the terms of the GPLv3 License\&. \fBansible\-playbook\fR(1), \fBansible\-pull\fR(1), \fBansible\-doc\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible -.SH "AUTHOR" -.PP -\fB:doctype:manpage\fR -.RS 4 -Author. -.RE diff --git a/docsite/Makefile b/docsite/Makefile index f5d1b10c12a..92129f78514 100644 --- a/docsite/Makefile +++ b/docsite/Makefile @@ -40,7 +40,7 @@ clean: .PHONEY: docs clean modules: $(FORMATTER) ../hacking/templates/rst.j2 - PYTHONPATH=../lib $(FORMATTER) -t rst --template-dir=../hacking/templates --module-dir=../library -o rst/ + PYTHONPATH=../lib $(FORMATTER) -t rst --template-dir=../hacking/templates --module-dir=../lib/ansible/modules -o rst/ staticmin: cat _themes/srtd/static/css/theme.css | sed -e 's/^[ \t]*//g; s/[ \t]*$$//g; s/\([:{;,]\) /\1/g; s/ {/{/g; s/\/\*.*\*\///g; /^$$/d' | sed -e :a -e '$$!N; s/\n\(.\)/\1/; ta' > _themes/srtd/static/css/theme.min.css diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html index 866efb72fc2..d073c4c22f8 100644 --- a/docsite/_themes/srtd/layout.html +++ b/docsite/_themes/srtd/layout.html @@ -177,15 +177,17 @@
- -
- - - -
 
-
 
-
- + +
+ + + + + + +
 
+
 
+
{% include "breadcrumbs.html" %}
diff --git a/docsite/build-site.py b/docsite/build-site.py index 70755b8a282..587a189f077 100755 --- a/docsite/build-site.py +++ b/docsite/build-site.py @@ -88,14 +88,7 @@ if __name__ == '__main__': print " Run 'make viewdocs' to build and then preview in a web browser." sys.exit(0) - # The 'htmldocs' make target will call this scrip twith the 'rst' - # parameter' We don't need to run the 'htmlman' target then. - if "rst" in sys.argv: - build_rst_docs() - else: - # By default, preform the rst->html transformation and then - # the asciidoc->html trasnformation - build_rst_docs() + build_rst_docs() if "view" in sys.argv: import webbrowser diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst index 3230a39f244..424db0ad466 100644 --- a/docsite/rst/YAMLSyntax.rst +++ b/docsite/rst/YAMLSyntax.rst @@ -25,7 +25,7 @@ Ansible or not) should begin with ``---``. This is part of the YAML format and indicates the start of a document. All members of a list are lines beginning at the same indentation level starting -with a ``-`` (dash) character:: +with a ``"- "`` (a dash and a space):: --- # A list of tasty fruits @@ -34,7 +34,7 @@ with a ``-`` (dash) character:: - Strawberry - Mango -A dictionary is represented in a simple ``key:`` and ``value`` form:: +A dictionary is represented in a simple ``key: value`` form (the colon must be followed by a space):: --- # An employee record diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst index d75ec8d0bbe..4d2de28ce16 100644 --- a/docsite/rst/community.rst +++ b/docsite/rst/community.rst @@ -57,16 +57,19 @@ feature development, so clearing bugs out of the way is one of the best things y If you're not a developer, helping test pull requests for bug fixes and features is still immensely valuable. You can do this by checking out ansible, making a test branch off the main one, merging a GitHub issue, testing, and then commenting on that particular issue on GitHub. -I'd Like To Report A Bugs +I'd Like To Report A Bug ------------------------------------ Ansible practices responsible disclosure - if this is a security related bug, email `security@ansible.com `_ instead of filing a ticket or posting to the Google Group and you will receive a prompt response. -Bugs should be reported to `github.com/ansible/ansible `_ after +Bugs related to the core language should be reported to `github.com/ansible/ansible `_ after signing up for a free github account. Before reporting a bug, please use the bug/issue search to see if the issue has already been reported. -When filing a bug, please use the `issue template `_ to provide all relevant information. +MODULE related bugs however should go to `ansible-modules-core `_ or `ansible-modules-extras `_ based on the classification of the module. This is listed on the bottom of the docs page for any module. + +When filing a bug, please use the `issue template `_ to provide all relevant information, regardless of what repo you are filing a ticket against. + Knowing your ansible version and the exact commands you are running, and what you expect, saves time and helps us help everyone with their issues more quickly. @@ -102,8 +105,7 @@ documenting a new feature, submit a github pull request to the code that lives in the “docsite/rst” subdirectory of the project for most pages, and there is an "Edit on GitHub" link up on those. -Module documentation is generated from a DOCUMENTATION structure embedded in the source code of each module -in the library/ directory. +Module documentation is generated from a DOCUMENTATION structure embedded in the source code of each module, which is in either the ansible-modules-core or ansible-modules-extra repos on github, depending on the module. Information about this is always listed on the bottom of the web documentation for each module. Aside from modules, the main docs are in restructured text format. @@ -113,7 +115,7 @@ github about any errors you spot or sections you would like to see added. For mo on creating pull requests, please refer to the `github help guide `_. -For Current and Propspective Developers +For Current and Prospective Developers ======================================= I'd Like To Learn How To Develop on Ansible @@ -130,10 +132,10 @@ Modules are some of the easiest places to get started. Contributing Code (Features or Bugfixes) ---------------------------------------- -The Ansible project keeps it’s source on github at -`github.com/ansible/ansible `_ - -and takes contributions through +The Ansible project keeps its source on github at +`github.com/ansible/ansible `_ for the core application, and two sub repos ansible/ansible-modules-core and ansible/ansible-modules-extras for module related items. If you need to know if a module is in 'core' or 'extras', consult the web documentation page for that module. + +The project takes contributions through `github pull requests `_. It is usually a good idea to join the ansible-devel list to discuss any large features prior to submission, and this especially helps in avoiding duplicate work or efforts where we decide, upon seeing a pull request for the first time, that revisions are needed. (This is not usually needed for module development, but can be nice for large changes). @@ -144,7 +146,7 @@ to modify a pull request later. When submitting patches, be sure to run the unit tests first “make tests” and always use “git rebase” vs “git merge” (aliasing git pull to git pull --rebase is a great idea) to -avoid merge commits in your submissions. There are also integration tests that can be run in the "tests/integration" directory. +avoid merge commits in your submissions. There are also integration tests that can be run in the "test/integration" directory. In order to keep the history clean and better audit incoming code, we will require resubmission of pull requests that contain merge commits. Use "git pull --rebase" vs "git pull" and "git rebase" vs "git merge". Also be sure to use topic branches to keep your additions on different branches, such that they won't pick up stray commits later. diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 0b1695c90f9..82edea9de89 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -11,9 +11,17 @@ See :doc:`modules` for a list of various ones developed in core. Modules can be written in any language and are found in the path specified by `ANSIBLE_LIBRARY` or the ``--module-path`` command line option. +By default, everything that ships with ansible is pulled from its source tree, but +additional paths can be added. + +The directory "./library", alongside your top level playbooks, is also automatically +added as a search directory. + Should you develop an interesting Ansible module, consider sending a pull request to the -`github project `_ to see about getting your module -included in the core project. +`modules-extras project `_. There's also a core +repo for more established and widely used modules. "Extras" modules may be promoted to core periodically, +but there's no fundamental difference in the end - both ship with ansible, all in one package, regardless +of how you acquire ansible. .. _module_dev_tutorial: @@ -40,7 +48,7 @@ modules. Keep in mind, though, that some modules in ansible's source tree are so look at `service` or `yum`, and don't stare too close into things like `async_wrapper` or you'll turn to stone. Nobody ever executes async_wrapper directly. -Ok, let's get going with an example. We'll use Python. For starters, save this as a file named `time`:: +Ok, let's get going with an example. We'll use Python. For starters, save this as a file named `timetest.py`:: #!/usr/bin/python @@ -59,13 +67,13 @@ Testing Modules There's a useful test script in the source checkout for ansible:: - git clone git@github.com:ansible/ansible.git + git clone git@github.com:ansible/ansible.git --recursive source ansible/hacking/env-setup chmod +x ansible/hacking/test-module Let's run the script you just wrote with that:: - ansible/hacking/test-module -m ./time + ansible/hacking/test-module -m ./timetest.py You should see output that looks something like this:: @@ -78,6 +86,7 @@ If you did not, you might have a typo in your module, so recheck it and try agai Reading Input ````````````` + Let's modify the module to allow setting the current time. We'll do this by seeing if a key value pair in the form `time=` is passed in to the module. @@ -222,7 +231,7 @@ As mentioned, if you are writing a module in Python, there are some very powerfu Modules are still transferred as one file, but an arguments file is no longer needed, so these are not only shorter in terms of code, they are actually FASTER in terms of execution time. -Rather than mention these here, the best way to learn is to read some of the `source of the modules `_ that come with Ansible. +Rather than mention these here, the best way to learn is to read some of the `source of the modules `_ that come with Ansible. The 'group' and 'user' modules are reasonably non-trivial and showcase what this looks like. @@ -253,7 +262,7 @@ And failures are just as simple (where 'msg' is a required parameter to explain module.fail_json(msg="Something fatal happened") -There are also other useful functions in the module class, such as module.md5(path). See +There are also other useful functions in the module class, such as module.sha1(path). See lib/ansible/module_common.py in the source checkout for implementation details. Again, modules developed this way are best tested with the hacking/test-module script in the git @@ -300,8 +309,7 @@ You should also never do this in a module:: print "some status message" -Because the output is supposed to be valid JSON. Except that's not quite true, -but we'll get to that later. +Because the output is supposed to be valid JSON. Modules must not output anything on standard error, because the system will merge standard out with standard error and prevent the JSON from parsing. Capturing standard @@ -334,7 +342,7 @@ and guidelines: * If packaging modules in an RPM, they only need to be installed on the control machine and should be dropped into /usr/share/ansible. This is entirely optional and up to you. -* Modules should return JSON or key=value results all on one line. JSON is best if you can do JSON. All return types must be hashes (dictionaries) although they can be nested. Lists or simple scalar values are not supported, though they can be trivially contained inside a dictionary. +* Modules should output valid JSON only. All return types must be hashes (dictionaries) although they can be nested. Lists or simple scalar values are not supported, though they can be trivially contained inside a dictionary. * In the event of failure, a key of 'failed' should be included, along with a string explanation in 'msg'. Modules that raise tracebacks (stacktraces) are generally considered 'poor' modules, though Ansible can deal with these returns and will automatically convert anything unparseable into a failed result. If you are using the AnsibleModule common Python code, the 'failed' element will be included for you automatically when you call 'fail_json'. @@ -342,21 +350,6 @@ and guidelines: * As results from many hosts will be aggregated at once, modules should return only relevant output. Returning the entire contents of a log file is generally bad form. -.. _module_dev_shorthand: - -Shorthand Vs JSON -````````````````` - -To make it easier to write modules in bash and in cases where a JSON -module might not be available, it is acceptable for a module to return -key=value output all on one line, like this. The Ansible parser -will know what to do:: - - somekey=1 somevalue=2 rc=3 favcolor=red - -If you're writing a module in Python or Ruby or whatever, though, returning -JSON is probably the simplest way to go. - .. _module_documenting: Documenting Your Module @@ -393,7 +386,7 @@ support formatting with some special macros. These formatting functions are ``U()``, ``M()``, ``I()``, and ``C()`` for URL, module, italic, and constant-width respectively. It is suggested to use ``C()`` for file and option names, and ``I()`` when referencing -parameters; module names should be specifies as ``M(module)``. +parameters; module names should be specified as ``M(module)``. Examples (which typically contain colons, quotes, etc.) are difficult to format with YAML, so these must be @@ -423,20 +416,55 @@ built and appear in the 'docsite/' directory. .. tip:: - You can use ANSIBLE_KEEP_REMOTE_FILES=1 to prevent ansible from + You can set the environment variable ANSIBLE_KEEP_REMOTE_FILES=1 on the controlling host to prevent ansible from deleting the remote files so you can debug your module. .. _module_contribution: -Getting Your Module Into Core -````````````````````````````` +Module Paths +```````````` + +If you are having trouble getting your module "found" by ansible, be sure it is in the ANSIBLE_LIBRARY_PATH. + +If you have a fork of one of the ansible module projects, do something like this:: + + ANSIBLE_LIBRARY=~/ansible-modules-core:~/ansible-modules-extras + +And this will make the items in your fork be loaded ahead of what ships with Ansible. Just be sure +to make sure you're not reporting bugs on versions from your fork! + +To be safe, if you're working on a variant on something in Ansible's normal distribution, it's not +a bad idea to give it a new name while you are working on it, to be sure you know you're pulling +your version. + +Getting Your Module Into Ansible +```````````````````````````````` High-quality modules with minimal dependencies -can be included in the core, but core modules (just due to the programming +can be included in Ansible, but modules (just due to the programming preferences of the developers) will need to be implemented in Python and use the AnsibleModule common code, and should generally use consistent arguments with the rest of the program. Stop by the mailing list to inquire about requirements if you like, and submit -a github pull request to the main project. +a github pull request to the `extras `_ project. +Included modules will ship with ansible, and also have a chance to be promoted to 'core' status, which +gives them slightly higher development priority (though they'll work in exactly the same way). + + +Deprecating and making module aliases +`````````````````````````````````````` + +Starting in 1.8 you can deprecate modules by renaming them with a preceding _, i.e. old_cloud.py to +_old_cloud.py, This will keep the module available but hide it from the primary docs and listing. + +You can also rename modules and keep an alias to the old name by using a symlink that starts with _. +This example allows the stat module to be called with fileinfo, making the following examples equivalent + + EXAMPLES = ''' + ln -s stat.py _fileinfo.py + ansible -m stat -a "path=/tmp" localhost + ansible -m fileinfo -a "path=/tmp" localhost + ''' + .. seealso:: diff --git a/docsite/rst/developing_plugins.rst b/docsite/rst/developing_plugins.rst index e758644359f..a54e8830f21 100644 --- a/docsite/rst/developing_plugins.rst +++ b/docsite/rst/developing_plugins.rst @@ -30,7 +30,7 @@ Lookup Plugins Language constructs like "with_fileglob" and "with_items" are implemented via lookup plugins. Just like other plugin types, you can write your own. -More documentation on writing connection plugins is pending, though you can jump into `lib/ansible/runner/lookup_plugins `_ and figure +More documentation on writing lookup plugins is pending, though you can jump into `lib/ansible/runner/lookup_plugins `_ and figure things out pretty easily. .. _developing_vars_plugins: @@ -42,7 +42,7 @@ Playbook constructs like 'host_vars' and 'group_vars' work via 'vars' plugins. data into ansible runs that did not come from an inventory, playbook, or command line. Note that variables can also be returned from inventory, so in most cases, you won't need to write or understand vars_plugins. -More documentation on writing connection plugins is pending, though you can jump into `lib/ansible/inventory/vars_plugins `_ and figure +More documentation on writing vars plugins is pending, though you can jump into `lib/ansible/inventory/vars_plugins `_ and figure things out pretty easily. If you find yourself wanting to write a vars_plugin, it's more likely you should write an inventory script instead. diff --git a/docsite/rst/developing_test_pr.rst b/docsite/rst/developing_test_pr.rst index e966c62c1c5..ee4520bf6c9 100644 --- a/docsite/rst/developing_test_pr.rst +++ b/docsite/rst/developing_test_pr.rst @@ -29,20 +29,32 @@ and then commenting on that particular issue on GitHub. Here's how: or Docker for this, but they are optional. It is also useful to have virtual machines of different Linux or other flavors, since some features (apt vs. yum, for example) are specific to those OS versions. -First, you will need to configure your testing environment with the neccessary tools required to run our test +First, you will need to configure your testing environment with the necessary tools required to run our test suites. You will need at least:: git - python-nosetests + python-nosetests (sometimes named python-nose) + python-passlib + +If you want to run the full integration test suite you'll also need the following packages installed:: + + svn + hg + python-pip + gem Second, if you haven't already, clone the Ansible source code from GitHub:: - git clone https://github.com/ansible/ansible.git + git clone https://github.com/ansible/ansible.git --recursive cd ansible/ .. note:: If you have previously forked the repository on GitHub, you could also clone it from there. +.. note:: + If updating your repo for testing something module related, use "git rebase origin/devel" and then "git submodule update" to fetch + the latest development versions of modules. Skipping the "git submodule update" step will result in versions that will be stale. + Activating The Source Checkout ++++++++++++++++++++++++++++++ diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 0065fd8a8f2..2929292b087 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -20,21 +20,21 @@ The ansible-galaxy command line tool The command line ansible-galaxy has many different subcommands. Installing Roles -++++++++++++++++ +---------------- The most obvious is downloading roles from the Ansible Galaxy website:: ansible-galaxy install username.rolename Building out Role Scaffolding -+++++++++++++++++++++++++++++ +----------------------------- It can also be used to initialize the base structure of a new role, saving time on creating the various directories and main.yml files a role requires:: ansible-galaxy init rolename Installing Multiple Roles From A File -+++++++++++++++++++++++++++++++++++++ +------------------------------------- To install multiple roles, the ansible-galaxy CLI can be fed a requirements file. All versions of ansible allow the following syntax for installing roles from the Ansible Galaxy website:: @@ -53,7 +53,7 @@ To request specific versions (tags) of a role, use this syntax in the roles file Available versions will be listed on the Ansible Galaxy webpage for that role. Advanced Control over Role Requirements Files -+++++++++++++++++++++++++++++++++++++++++++++ +--------------------------------------------- For more advanced control over where to download roles from, including support for remote repositories, Ansible 1.8 and later support a new YAML format for the role requirements file, which must end in a 'yml' extension. It works like this:: @@ -93,7 +93,7 @@ And here's an example showing some specific version downloads from multiple sour As you can see in the above, there are a large amount of controls available to customize where roles can be pulled from, and what to save roles as. -Roles pulled from galaxy work as with othe SCM sourced roles above. To download a role with dependencies, and automatically install those dependencies, the role must be uploaded to the Ansible Galaxy website. +Roles pulled from galaxy work as with other SCM sourced roles above. To download a role with dependencies, and automatically install those dependencies, the role must be uploaded to the Ansible Galaxy website. .. seealso:: diff --git a/docsite/rst/guide_aws.rst b/docsite/rst/guide_aws.rst index deecca2bda5..7cfffc218db 100644 --- a/docsite/rst/guide_aws.rst +++ b/docsite/rst/guide_aws.rst @@ -6,120 +6,146 @@ Amazon Web Services Guide Introduction ```````````` -.. note:: This section of the documentation is under construction. We are in the process of adding more examples about all of the EC2 modules - and how they work together. There's also an ec2 example in the language_features directory of `the ansible-examples github repository `_ that you may wish to consult. Once complete, there will also be new examples of ec2 in ansible-examples. - -Ansible contains a number of core modules for interacting with Amazon Web Services (AWS). These also work with Eucalyptus, which is an AWS compatible private cloud solution. There are other supported cloud types, but this documentation chapter is about AWS API clouds. The purpose of this +Ansible contains a number of modules for controlling Amazon Web Services (AWS). The purpose of this section is to explain how to put Ansible modules together (and use inventory scripts) to use Ansible in AWS context. -Requirements for the AWS modules are minimal. All of the modules require and are tested against boto 2.5 or higher. You'll need this Python module installed on the execution host. If you are using Red Hat Enterprise Linux or CentOS, install boto from `EPEL `_: +Requirements for the AWS modules are minimal. -.. code-block:: bash +All of the modules require and are tested against recent versions of boto. You'll need this Python module installed on your control machine. Boto can be installed from your OS distribution or python's "pip install boto". - $ yum install python-boto +Whereas classically ansible will execute tasks in it's host loop against multiple remote machines, most cloud-control steps occur on your local machine with reference to the regions to control. -You can also install it via pip if you want. - -The following steps will often execute outside the host loop, so it makes sense to add localhost to inventory. Ansible -may not require this step in the future:: - - [local] - localhost - -And in your playbook steps we'll typically be using the following pattern for provisioning steps:: +In your playbook steps we'll typically be using the following pattern for provisioning steps:: - hosts: localhost connection: local gather_facts: False + tasks: + - ... + +.. _aws_authentication: + +Authentication +`````````````` + +Authentication with the AWS-related modules is handled by either +specifying your access and secret key as ENV variables or module arguments. + +For environment variables:: + + export AWS_ACCESS_KEY_ID='AK123' + export AWS_SECRET_ACCESS_KEY='abc123' + +For storing these in a vars_file, ideally encrypted with ansible-vault:: + + --- + ec2_access_key: "--REMOVED--" + ec2_secret_key: "--REMOVED--" .. _aws_provisioning: Provisioning ```````````` -The ec2 module provides the ability to provision instances within EC2. Typically the provisioning task will be performed against your Ansible master server in a play that operates on localhost using the ``local`` connection type. If you are doing an EC2 operation mid-stream inside a regular play operating on remote hosts, you may want to use the ``local_action`` keyword for that particular task. Read :doc:`playbooks_delegation` for more about local actions. +The ec2 module provisions and de-provisions instances within EC2. -.. note:: +An example of making sure there are only 5 instances tagged 'Demo' in EC2 follows. - Authentication with the AWS-related modules is handled by either - specifying your access and secret key as ENV variables or passing - them as module arguments. +In the example below, the "exact_count" of instances is set to 5. This means if there are 0 instances already existing, then +5 new instances would be created. If there were 2 instances, only 3 would be created, and if there were 8 instances, 3 instances would +be terminated. -.. note:: +What is being counted is specified by the "count_tag" parameter. The parameter "instance_tags" is used to apply tags to the newly created +instance.:: - To talk to specific endpoints, the environmental variable EC2_URL - can be set. This is useful if using a private cloud like Eucalyptus, - exporting the variable as EC2_URL=https://myhost:8773/services/Eucalyptus. - This can be set using the 'environment' keyword in Ansible if you like. + # demo_setup.yml -Here is an example of provisioning a number of instances in ad-hoc mode: + - hosts: localhost + connection: local + gather_facts: False -.. code-block:: bash + tasks: - # ansible localhost -m ec2 -a "image=ami-6e649707 instance_type=m1.large keypair=mykey group=webservers wait=yes" -c local + - name: Provision a set of instances + ec2: + key_name: my_key + group: test + instance_type: t2.micro + image: "{{ ami_id }}" + wait: true + exact_count: 5 + count_tag: + Name: Demo + instance_tags: + Name: Demo + register: ec2 -In a play, this might look like (assuming the parameters are held as vars):: +The data about what instances are created is being saved by the "register" keyword in the variable named "ec2". + +From this, we'll use the add_host module to dynamically create a host group consisting of these new instances. This facilitates performing configuration actions on the hosts immediately in a subsequent task.:: + + # demo_setup.yml + + - hosts: localhost + connection: local + gather_facts: False + + tasks: + + - name: Provision a set of instances + ec2: + key_name: my_key + group: test + instance_type: t2.micro + image: "{{ ami_id }}" + wait: true + exact_count: 5 + count_tag: + Name: Demo + instance_tags: + Name: Demo + register: ec2 + + - name: Add all instance public IPs to host group + add_host: hostname={{ item.public_ip }} groupname=ec2hosts + with_items: ec2.instances + +With the host group now created, a second play at the bottom of the the same provisioning playbook file might now have some configuration steps:: + + # demo_setup.yml - tasks: - name: Provision a set of instances - ec2: > - keypair={{mykeypair}} - group={{security_group}} - instance_type={{instance_type}} - image={{image}} - wait=true - count={{number}} - register: ec2 + hosts: localhost + # ... AS ABOVE ... - -By registering the return its then possible to dynamically create a host group consisting of these new instances. This facilitates performing configuration actions on the hosts immediately in a subsequent task:: - - - name: Add all instance public IPs to host group - add_host: hostname={{ item.public_ip }} groupname=ec2hosts - with_items: ec2.instances - -With the host group now created, a second play in your provision playbook might now have some configuration steps:: - - - name: Configuration play - hosts: ec2hosts + - hosts: ec2hosts + name: configuration play user: ec2-user gather_facts: true tasks: - - name: Check NTP service - service: name=ntpd state=started -Rather than include configuration inline, you may also choose to just do it as a task include or a role. - -The method above ties the configuration of a host with the provisioning step. This isn't always ideal and leads us onto the next section. - -.. _aws_advanced: - -Advanced Usage -`````````````` + - name: Check NTP service + service: name=ntpd state=started .. _aws_host_inventory: Host Inventory -++++++++++++++ +`````````````` -Once your nodes are spun up, you'll probably want to talk to them again. The best way to handle this is to use the ec2 inventory plugin. +Once your nodes are spun up, you'll probably want to talk to them again. With a cloud setup, it's best to not maintain a static list of cloud hostnames +in text files. Rather, the best way to handle this is to use the ec2 dynamic inventory script. -Even for larger environments, you might have nodes spun up from Cloud Formations or other tooling. You don't have to use Ansible to spin up guests. Once these are created and you wish to configure them, the EC2 API can be used to return system grouping with the help of the EC2 inventory script. This script can be used to group resources by their security group or tags. Tagging is highly recommended in EC2 and can provide an easy way to sort between host groups and roles. The inventory script is documented doc:`api` section. +This will also dynamically select nodes that were even created outside of Ansible, and allow Ansible to manage them. -You may wish to schedule a regular refresh of the inventory cache to accommodate for frequent changes in resources: +See the doc:`aws_example` for how to use this, then flip back over to this chapter. -.. code-block:: bash - - # ./ec2.py --refresh-cache +.. _aws_tags_and_groups: -Put this into a crontab as appropriate to make calls from your Ansible master server to the EC2 API endpoints and gather host information. The aim is to keep the view of hosts as up-to-date as possible, so schedule accordingly. Playbook calls could then also be scheduled to act on the refreshed hosts inventory after each refresh. This approach means that machine images can remain "raw", containing no payload and OS-only. Configuration of the workload is handled entirely by Ansible. +Tags And Groups And Variables +````````````````````````````` -Tags -++++ - -There's a feature in the ec2 inventory script where hosts tagged with -certain keys and values automatically appear in certain groups. +When using the ec2 inventory script, hosts automatically appear in groups based on how they are tagged in EC2. For instance, if a host is given the "class" tag with the value of "webserver", it will be automatically discoverable via a dynamic group like so:: @@ -128,178 +154,83 @@ it will be automatically discoverable via a dynamic group like so:: tasks: - ping -Using this philosophy can be a great way to manage groups dynamically, without -having to maintain separate inventory. +Using this philosophy can be a great way to keep systems separated by the function they perform. + +In this example, if we wanted to define variables that are automatically applied to each machine tagged with the 'class' of 'webserver', 'group_vars' +in ansible can be used. See :doc:`splitting_out_vars`. + +Similar groups are available for regions and other classifications, and can be similarly assigned variables using the same mechanism. .. _aws_pull: -Pull Configuration -++++++++++++++++++ +Autoscaling with Ansible Pull +````````````````````````````` -For some the delay between refreshing host information and acting on that host information (i.e. running Ansible tasks against the hosts) may be too long. This may be the case in such scenarios where EC2 AutoScaling is being used to scale the number of instances as a result of a particular event. Such an event may require that hosts come online and are configured as soon as possible (even a 1 minute delay may be undesirable). Its possible to pre-bake machine images which contain the necessary ansible-pull script and components to pull and run a playbook via git. The machine images could be configured to run ansible-pull upon boot as part of the bootstrapping procedure. +Amazon Autoscaling features automatically increase or decrease capacity based on load. There are also Ansible ansibles shown in the cloud documentation that +can configure autoscaling policy. + +When nodes come online, it may not be sufficient to wait for the next cycle of an ansible command to come along and configure that node. + +To do this, pre-bake machine images which contain the necessary ansible-pull invocation. Ansible-pull is a command line tool that fetches a playbook from a git server and runs it locally. + +One of the challenges of this approach is that there needs to be a centralized way to store data about the results of pull commands in an autoscaling context. +For this reason, the autoscaling solution provided below in the next section can be a better approach. Read :ref:`ansible-pull` for more information on pull-mode playbooks. -(Various developments around Ansible are also going to make this easier in the near future. Stay tuned!) - .. _aws_autoscale: Autoscaling with Ansible Tower -++++++++++++++++++++++++++++++ +`````````````````````````````` :doc:`tower` also contains a very nice feature for auto-scaling use cases. In this mode, a simple curl script can call a defined URL and the server will "dial out" to the requester and configure an instance that is spinning up. This can be a great way -to reconfigure ephemeral nodes. See the Tower documentation for more details. Click on the Tower link in the sidebar for details. +to reconfigure ephemeral nodes. See the Tower install and product documentation for more details. A benefit of using the callback in Tower over pull mode is that job results are still centrally recorded and less information has to be shared with remote hosts. -.. _aws_use_cases: - -Use Cases -````````` - -This section covers some usage examples built around a specific use case. - .. _aws_cloudformation_example: -Example 1 -+++++++++ +Ansible With (And Versus) CloudFormation +```````````````````````````````````````` - Example 1: I'm using CloudFormation to deploy a specific infrastructure stack. I'd like to manage configuration of the instances with Ansible. +CloudFormation is a Amazon technology for defining a cloud stack as a JSON document. -Provision instances with your tool of choice and consider using the inventory plugin to group hosts based on particular tags or security group. Consider tagging instances you wish to managed with Ansible with a suitably unique key=value tag. +Ansible modules provide an easier to use interface than CloudFormation in many examples, without defining a complex JSON document. +This is recommended for most users. -.. note:: Ansible also has a cloudformation module you may wish to explore. +However, for users that have decided to use CloudFormation, there is an Ansible module that can be used to apply a CloudFormation template +to Amazon. -.. _aws_autoscale_example: +When using Ansible with CloudFormation, typically Ansible will be used with a tool like Packer to build images, and CloudFormation will launch +those images, or ansible will be invoked through user data once the image comes online, or a combination of the two. -Example 2 -+++++++++ +Please see the examples in the Ansible CloudFormation module for more details. - Example 2: I'm using AutoScaling to dynamically scale up and scale down the number of instances. This means the number of hosts is constantly fluctuating but I'm letting EC2 automatically handle the provisioning of these instances. I don't want to fully bake a machine image, I'd like to use Ansible to configure the hosts. +.. _aws_image_build: -There are several approaches to this use case. The first is to use the inventory plugin to regularly refresh host information and then target hosts based on the latest inventory data. The second is to use ansible-pull triggered by a user-data script (specified in the launch configuration) which would then mean that each instance would fetch Ansible and the latest playbook from a git repository and run locally to configure itself. You could also use the Tower callback feature. +AWS Image Building With Ansible +``````````````````````````````` -.. _aws_builds: +Many users may want to have images boot to a more complete configuration rather than configuring them entirely after instantiation. To do this, +one of many programs can be used with Ansible playbooks to define and upload a base image, which will then get it's own AMI ID for usage with +the ec2 module or other Ansible AWS modules such as ec2_asg or the cloudformation module. Possible tools include Packer, aminator, and Ansible's +ec2_ami module. -Example 3 -+++++++++ +Generally speaking, we find most users using Packer. - Example 3: I don't want to use Ansible to manage my instances but I'd like to consider using Ansible to build my fully-baked machine images. +`Documentation for the Ansible Packer provisioner can be found here `_. -There's nothing to stop you doing this. If you like working with Ansible's playbook format then writing a playbook to create an image; create an image file with dd, give it a filesystem and then install packages and finally chroot into it for further configuration. Ansible has the 'chroot' plugin for this purpose, just add the following to your inventory file:: +If you do not want to adopt Packer at this time, configuring a base-image with Ansible after provisioning (as shown above) is acceptable. - /chroot/path ansible_connection=chroot +.. _aws_next_steps: -And in your playbook:: - - hosts: /chroot/path - -Example 4 -+++++++++ - - How would I create a new ec2 instance, provision it and then destroy it all in the same play? - -.. code-block:: yaml - - # Use the ec2 module to create a new host and then add - # it to a special "ec2hosts" group. - - - hosts: localhost - connection: local - gather_facts: False - vars: - ec2_access_key: "--REMOVED--" - ec2_secret_key: "--REMOVED--" - keypair: "mykeyname" - instance_type: "t1.micro" - image: "ami-d03ea1e0" - group: "mysecuritygroup" - region: "us-west-2" - zone: "us-west-2c" - tasks: - - name: make one instance - ec2: image={{ image }} - instance_type={{ instance_type }} - aws_access_key={{ ec2_access_key }} - aws_secret_key={{ ec2_secret_key }} - keypair={{ keypair }} - instance_tags='{"foo":"bar"}' - region={{ region }} - group={{ group }} - wait=true - register: ec2_info - - - debug: var=ec2_info - - debug: var=item - with_items: ec2_info.instance_ids - - - add_host: hostname={{ item.public_ip }} groupname=ec2hosts - with_items: ec2_info.instances - - - name: wait for instances to listen on port:22 - wait_for: - state=started - host={{ item.public_dns_name }} - port=22 - with_items: ec2_info.instances - - - # Connect to the node and gather facts, - # including the instance-id. These facts - # are added to inventory hostvars for the - # duration of the playbook's execution - # Typical "provisioning" tasks would go in - # this playbook. - - - hosts: ec2hosts - gather_facts: True - user: ec2-user - sudo: True - tasks: - - # fetch instance data from the metadata servers in ec2 - - ec2_facts: - - # show all known facts for this host - - debug: var=hostvars[inventory_hostname] - - # just show the instance-id - - debug: msg="{{ hostvars[inventory_hostname]['ansible_ec2_instance_id'] }}" - - - # Using the instanceid, call the ec2 module - # locally to remove the instance by declaring - # it's state is "absent" - - - hosts: ec2hosts - gather_facts: True - connection: local - vars: - ec2_access_key: "--REMOVED--" - ec2_secret_key: "--REMOVED--" - region: "us-west-2" - tasks: - - name: destroy all instances - ec2: state='absent' - aws_access_key={{ ec2_access_key }} - aws_secret_key={{ ec2_secret_key }} - region={{ region }} - instance_ids={{ item }} - wait=true - with_items: hostvars[inventory_hostname]['ansible_ec2_instance_id'] - - -.. note:: more examples of this are pending. You may also be interested in the ec2_ami module for taking AMIs of running instances. - -.. _aws_pending: - -Pending Information -``````````````````` - -In the future look here for more topics. +Next Steps: Explore Modules +``````````````````````````` +Ansible ships with lots of modules for configuring a wide array of EC2 services. Browse the "Cloud" category of the module +documentation for a full list with examples. .. seealso:: @@ -309,7 +240,7 @@ In the future look here for more topics. An introduction to playbooks :doc:`playbooks_delegation` Delegation, useful for working with loud balancers, clouds, and locally executed steps. - `User Mailing List `_ + `User Mailing List `_ Have a question? Stop by the google group! `irc.freenode.net `_ #ansible IRC chat channel diff --git a/docsite/rst/guide_gce.rst b/docsite/rst/guide_gce.rst index f9e498ac0aa..c0f90f13e26 100644 --- a/docsite/rst/guide_gce.rst +++ b/docsite/rst/guide_gce.rst @@ -22,7 +22,7 @@ The GCE modules all require the apache-libcloud module, which you can install fr Credentials ----------- -To work with the GCE modules, you'll first need to get some credentials. You can create new one from the `console `_ by going to the "APIs and Auth" section. Once you've created a new client ID and downloaded the generated private key (in the `pkcs12 format `_), you'll need to convert the key by running the following command: +To work with the GCE modules, you'll first need to get some credentials. You can create new one from the `console `_ by going to the "APIs and Auth" section and choosing to create a new client ID for a service account. Once you've created a new client ID and downloaded the generated private key (in the `pkcs12 format `_), you'll need to convert the key by running the following command: .. code-block:: bash @@ -133,18 +133,18 @@ For the following use case, let's use this small shell script as a wrapper. .. code-block:: bash - #!/bin/bash + #!/usr/bin/env bash PLAYBOOK="$1" - if [ -z $PLAYBOOK ]; then - echo "You need to pass a playback as argument to this script." + if [[ -z $PLAYBOOK ]]; then + echo "You need to pass a playbook as argument to this script." exit 1 fi export SSL_CERT_FILE=$(pwd)/cacert.cer export ANSIBLE_HOST_KEY_CHECKING=False - if [ ! -f "$SSL_CERT_FILE" ]; then + if [[ ! -f "$SSL_CERT_FILE" ]]; then curl -O http://curl.haxx.se/ca/cacert.pem fi @@ -175,11 +175,11 @@ A playbook would looks like this: tasks: - name: Launch instances gce: - instance_names: dev - machine_type: "{{ machine_type }}" - image: "{{ image }}" - service_account_email: "{{ service_account_email }}" - pem_file: "{{ pem_file }}" + instance_names: dev + machine_type: "{{ machine_type }}" + image: "{{ image }}" + service_account_email: "{{ service_account_email }}" + pem_file: "{{ pem_file }}" project_id: "{{ project_id }}" tags: webserver register: gce @@ -188,15 +188,18 @@ A playbook would looks like this: wait_for: host={{ item.public_ip }} port=22 delay=10 timeout=60 with_items: gce.instance_data - - name: add_host hostname={{ item.public_ip }} groupname=new_instances + - name: Add host to groupname + add_host: hostname={{ item.public_ip }} groupname=new_instances + with_items: gce.instance_data - name: Manage new instances hosts: new_instances connection: ssh + sudo: True roles: - base_configuration - production_server - + Note that use of the "add_host" module above creates a temporary, in-memory group. This means that a play in the same playbook can then manage machines in the 'new_instances' group, if so desired. Any sort of arbitrary configuration is possible at this point. diff --git a/docsite/rst/guide_rax.rst b/docsite/rst/guide_rax.rst index d00a090fa39..2a2f415e698 100644 --- a/docsite/rst/guide_rax.rst +++ b/docsite/rst/guide_rax.rst @@ -163,7 +163,7 @@ In Ansible it is quite possible to use multiple dynamic inventory plugins along rax.py ++++++ -To use the rackspace dynamic inventory script, copy ``rax.py`` into your inventory directory and make it executable. You can specify a credentails file for ``rax.py`` utilizing the ``RAX_CREDS_FILE`` environment variable. +To use the rackspace dynamic inventory script, copy ``rax.py`` into your inventory directory and make it executable. You can specify a credentials file for ``rax.py`` utilizing the ``RAX_CREDS_FILE`` environment variable. .. note:: Dynamic inventory scripts (like ``rax.py``) are saved in ``/usr/share/ansible/inventory`` if Ansible has been installed globally. If installed to a virtualenv, the inventory scripts are installed to ``$VIRTUALENV/share/inventory``. diff --git a/docsite/rst/guide_vagrant.rst b/docsite/rst/guide_vagrant.rst index 8dc8d10b449..f61fd84feba 100644 --- a/docsite/rst/guide_vagrant.rst +++ b/docsite/rst/guide_vagrant.rst @@ -107,14 +107,16 @@ inventory file may look something like this: If you want to run Ansible manually, you will want to make sure to pass ``ansible`` or ``ansible-playbook`` commands the correct arguments for the -username (usually ``vagrant``) and the SSH key (usually -``~/.vagrant.d/insecure_private_key``), and the autogenerated inventory file. +username (usually ``vagrant``) and the SSH key (since Vagrant 1.7.0, this will be something like +``.vagrant/machines/[machine name]/[provider]/private_key``), and the autogenerated inventory file. Here is an example: .. code-block:: bash - $ ansible-playbook -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory --private-key=~/.vagrant.d/insecure_private_key -u vagrant playbook.yml + $ ansible-playbook -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory --private-key=.vagrant/machines/default/virtualbox/private_key -u vagrant playbook.yml + +Note: Vagrant versions prior to 1.7.0 will use the private key located at ``~/.vagrant.d/insecure_private_key.`` .. seealso:: diff --git a/docsite/rst/guru.rst b/docsite/rst/guru.rst deleted file mode 100644 index e4f07fd3478..00000000000 --- a/docsite/rst/guru.rst +++ /dev/null @@ -1,10 +0,0 @@ -Ansible Guru -```````````` - -While many users should be able to get on fine with the documentation, mailing list, and IRC, sometimes you want a bit more. - -`Ansible Guru `_ is an offering from Ansible, Inc that helps users who would like more dedicated help with Ansible, including building playbooks, best practices, architecture suggestions, and more -- all from our awesome support and services team. It also includes some useful discounts and also some free T-shirts, though you shouldn't get it just for the free shirts! It's a great way to train up to becoming an Ansible expert. - -For those interested, click through the link above. You can sign up in minutes! - -For users looking for more hands-on help, we also have some more information on our `Services page `_, and support is also included with :doc:`tower`. diff --git a/docsite/rst/index.rst b/docsite/rst/index.rst index b7bd7cce95b..c8d263d01ae 100644 --- a/docsite/rst/index.rst +++ b/docsite/rst/index.rst @@ -16,7 +16,7 @@ We believe simplicity is relevant to all sizes of environments and design for bu Ansible manages machines in an agentless manner. There is never a question of how to upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. As OpenSSH is one of the most peer reviewed open source components, the security exposure of using the tool is greatly reduced. Ansible is decentralized -- it relies on your existing OS credentials to control access to remote machines; if needed it can easily connect with Kerberos, LDAP, and other centralized authentication management systems. -This documentation covers the current released version of Ansible (1.6.10) and also some development version features (1.7). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release. +This documentation covers the current released version of Ansible (1.8.2) and also some development version features (1.9). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release. .. _an_introduction: @@ -38,5 +38,4 @@ This documentation covers the current released version of Ansible (1.6.10) and a faq glossary YAMLSyntax - guru diff --git a/docsite/rst/intro_adhoc.rst b/docsite/rst/intro_adhoc.rst index 787a7e76ee9..2978343abe0 100644 --- a/docsite/rst/intro_adhoc.rst +++ b/docsite/rst/intro_adhoc.rst @@ -154,11 +154,11 @@ with yum. Ensure a package is installed, but don't update it:: - $ ansible webservers -m yum -a "name=acme state=installed" + $ ansible webservers -m yum -a "name=acme state=present" Ensure a package is installed to a specific version:: - $ ansible webservers -m yum -a "name=acme-1.5 state=installed" + $ ansible webservers -m yum -a "name=acme-1.5 state=present" Ensure a package is at the latest version:: @@ -166,7 +166,7 @@ Ensure a package is at the latest version:: Ensure a package is not installed:: - $ ansible webservers -m yum -a "name=acme state=removed" + $ ansible webservers -m yum -a "name=acme state=absent" Ansible has modules for managing packages under many platforms. If your package manager does not have a module available for it, you can install @@ -225,16 +225,16 @@ Ensure a service is stopped:: Time Limited Background Operations `````````````````````````````````` -Long running operations can be backgrounded, and their status can be -checked on later. The same job ID is given to the same task on all -hosts, so you won't lose track. If you kick hosts and don't want -to poll, it looks like this:: +Long running operations can be backgrounded, and their status can be checked on +later. If you kick hosts and don't want to poll, it looks like this:: - $ ansible all -B 3600 -a "/usr/bin/long_running_operation --do-stuff" + $ ansible all -B 3600 -P 0 -a "/usr/bin/long_running_operation --do-stuff" -If you do decide you want to check on the job status later, you can:: +If you do decide you want to check on the job status later, you can use the +async_status module, passing it the job id that was returned when you ran +the original job in the background:: - $ ansible all -m async_status -a "jid=123456789" + $ ansible web1.example.com -m async_status -a "jid=488359678239.2844" Polling is built-in and looks like this:: diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 016faf5e44c..4cb1f359948 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -70,7 +70,7 @@ Actions are pieces of code in ansible that enable things like module execution, This is a developer-centric feature that allows low-level extensions around Ansible to be loaded from different locations:: - action_plugins = /usr/share/ansible_plugins/action_plugins + action_plugins = ~/.ansible/plugins/action_plugins/:/usr/share/ansible_plugins/action_plugins Most users will not need to use this feature. See :doc:`developing_plugins` for more details. @@ -135,10 +135,12 @@ Prior to 1.8, callbacks were never loaded for /usr/bin/ansible. callback_plugins ================ +Callbacks are pieces of code in ansible that get called on specific events, permitting to trigger notifications. + This is a developer-centric feature that allows low-level extensions around Ansible to be loaded from different locations:: - callback_plugins = /usr/share/ansible_plugins/callback_plugins + callback_plugins = ~/.ansible/plugins/callback_plugins/:/usr/share/ansible_plugins/callback_plugins Most users will not need to use this feature. See :doc:`developing_plugins` for more details @@ -154,9 +156,9 @@ command module appear to be simplified by using a default Ansible module instead. This can include reminders to use the 'git' module instead of shell commands to execute 'git'. Using modules when possible over arbitrary shell commands can lead to more reliable and consistent playbook runs, and -also easier to maintain playbooks. +also easier to maintain playbooks:: - command_warnings=False + command_warnings = False These warnings can be silenced by adjusting the following setting or adding warn=yes or warn=no to the end of the command line @@ -171,10 +173,12 @@ parameter string, like so:: connection_plugins ================== +Connections plugin permit to extend the channel used by ansible to transport commands and files. + This is a developer-centric feature that allows low-level extensions around Ansible to be loaded from different locations:: - connection_plugins = /usr/share/ansible_plugins/connection_plugins + connection_plugins = ~/.ansible/plugins/connection_plugins/:/usr/share/ansible_plugins/connection_plugins Most users will not need to use this feature. See :doc:`developing_plugins` for more details @@ -230,13 +234,24 @@ rare instances to /bin/bash in rare instances when sudo is constrained, but in m filter_plugins ============== +Filters are specific functions that can be used to extend the template system. + This is a developer-centric feature that allows low-level extensions around Ansible to be loaded from different locations:: - filter_plugins = /usr/share/ansible_plugins/filter_plugins + filter_plugins = ~/.ansible/plugins/filter_plugins/:/usr/share/ansible_plugins/filter_plugins Most users will not need to use this feature. See :doc:`developing_plugins` for more details +.. _force_color: + +force_color +=========== + +This options forces color mode even when running without a TTY:: + + force_color = 1 + .. _forks: forks @@ -280,10 +295,7 @@ The valid values are either 'replace' (the default) or 'merge'. hostfile ======== -This is the default location of the inventory file, script, or directory that Ansible will use to determine what hosts it has available -to talk to:: - - hostfile = /etc/ansible/hosts +This is a deprecated setting since 1.9, please look at :ref:`inventory` for the new setting. .. _host_key_checking: @@ -295,6 +307,18 @@ implications and wish to disable it, you may do so here by setting the value to host_key_checking=True +.. _inventory: + +inventory +========= + +This is the default location of the inventory file, script, or directory that Ansible will use to determine what hosts it has available +to talk to:: + + inventory = /etc/ansible/hosts + +It used to be called hostfile in Ansible before 1.9 + .. _jinja2_extensions: jinja2_extensions @@ -341,7 +365,7 @@ lookup_plugins This is a developer-centric feature that allows low-level extensions around Ansible to be loaded from different locations:: - lookup_plugins = /usr/share/ansible_plugins/lookup_plugins + lookup_plugins = ~/.ansible/plugins/lookup_plugins/:/usr/share/ansible_plugins/lookup_plugins Most users will not need to use this feature. See :doc:`developing_plugins` for more details @@ -487,7 +511,7 @@ sudo_flags ========== Additional flags to pass to sudo when engaging sudo support. The default is '-H' which preserves the environment -of the original user. In some situations you may wish to add or remote flags, but in general most users +of the original user. In some situations you may wish to add or remove flags, but in general most users will not need to change this setting:: sudo_flags=-H @@ -544,10 +568,24 @@ vars_plugins This is a developer-centric feature that allows low-level extensions around Ansible to be loaded from different locations:: - vars_plugins = /usr/share/ansible_plugins/vars_plugins + vars_plugins = ~/.ansible/plugins/vars_plugins/:/usr/share/ansible_plugins/vars_plugins Most users will not need to use this feature. See :doc:`developing_plugins` for more details + +.. _vault_password_file: + +vault_password_file +=================== + +.. versionadded:: 1.7 + +Configures the path to the Vault password file as an alternative to specifying ``--vault-password-file`` on the command line:: + + vault_password_file = /path/to/vault_password_file + +As of 1.7 this file can also be a script. If you are using a script instead of a flat file, ensure that it is marked as executable, and that the password is printed to standard output. If your script needs to prompt for data, prompts can be sent to standard error. + .. _paramiko_settings: Paramiko Specific Settings @@ -639,8 +677,8 @@ recommended if you can enable it, eliminating the need for :doc:`playbooks_accel .. _accelerate_settings: -Accelerate Mode Settings ------------------------- +Accelerated Mode Settings +------------------------- Under the [accelerate] header, the following settings are tunable for :doc:`playbooks_acceleration`. Acceleration is a useful performance feature to use if you cannot enable :ref:`pipelining` in your environment, but is probably @@ -653,7 +691,7 @@ accelerate_port .. versionadded:: 1.3 -This is the port to use for accelerate mode:: +This is the port to use for accelerated mode:: accelerate_port = 5099 diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index f8a5c92b2de..ddb452e7756 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -87,7 +87,7 @@ marking it executable:: ansible -i ec2.py -u ubuntu us-east-1d -m ping -The second option is to copy the script to `/etc/ansible/hosts` and `chmod +x` it. You will also need to copy the `ec2.ini `_ file to `/etc/ansible/ec2.ini`. Then you can run ansible as you would normally. +The second option is to copy the script to `/etc/ansible/hosts` and `chmod +x` it. You will also need to copy the `ec2.ini `_ file to `/etc/ansible/ec2.ini`. Then you can run ansible as you would normally. To successfully make an API call to AWS, you will need to configure Boto (the Python interface to AWS). There are a `variety of methods `_ available, but the simplest is just to export two environment variables:: @@ -189,7 +189,9 @@ To see the complete list of variables available for an instance, run the script ./ec2.py --host ec2-12-12-12-12.compute-1.amazonaws.com Note that the AWS inventory script will cache results to avoid repeated API calls, and this cache setting is configurable in ec2.ini. To -explicitly clear the cache, you can run the ec2.py script with the ``--refresh-cache`` parameter. +explicitly clear the cache, you can run the ec2.py script with the ``--refresh-cache`` parameter:: + + # ./ec2.py --refresh-cache .. _other_inventory_scripts: @@ -223,6 +225,26 @@ If the location given to -i in Ansible is a directory (or as so configured in an at the same time. When doing so, it is possible to mix both dynamic and statically managed inventory sources in the same ansible run. Instant hybrid cloud! +.. _static_groups_of_dynamic: + +Static Groups of Dynamic Groups +``````````````````````````````` + +When defining groups of groups in the static inventory file, the child groups +must also be defined in the static inventory file, or ansible will return an +error. If you want to define a static group of dynamic child groups, define +the dynamic groups as empty in the static inventory file. For example:: + + [tag_Name_staging_foo] + + [tag_Name_staging_bar] + + [staging:children] + tag_Name_staging_foo + tag_Name_staging_bar + + + .. seealso:: :doc:`intro_inventory` diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index 410284ab7d6..07e0c501c70 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -107,17 +107,29 @@ To install from source. .. code-block:: bash - $ git clone git://github.com/ansible/ansible.git + $ git clone git://github.com/ansible/ansible.git --recursive $ cd ./ansible $ source ./hacking/env-setup +If you want to suppress spurious warnings/errors, use: + + $ source ./hacking/env-setup -q + If you don't have pip installed in your version of Python, install pip:: $ sudo easy_install pip Ansible also uses the following Python modules that need to be installed:: - $ sudo pip install paramiko PyYAML jinja2 httplib2 + $ sudo pip install paramiko PyYAML Jinja2 httplib2 + +Note when updating ansible, be sure to not only update the source tree, but also the "submodules" in git +which point at Ansible's own modules (not the same kind of modules, alas). + +.. code-block:: bash + + $ git pull --rebase + $ git submodule update --init --recursive Once running the env-setup script you'll be running from checkout and the default inventory file will be /etc/ansible/hosts. You can optionally specify an inventory file (see :doc:`intro_inventory`) @@ -194,6 +206,24 @@ You may also wish to run from source to get the latest, which is covered above. .. _from_pkg: +Latest Releases Via Portage (Gentoo) +++++++++++++++++++++++++++++++++++++ + +.. code-block:: bash + + $ emerge -av app-admin/ansible + +To install the newest version, you may need to unmask the ansible package prior to emerging: + +.. code-block:: bash + + $ echo 'app-admin/ansible' >> /etc/portage/package.accept_keywords + +.. note:: + + If you have Python 3 as a default Python slot on your Gentoo nodes (default setting), then you + must set ``ansible_python_interpreter = /usr/bin/python2`` in your group or inventory variables. + Latest Releases Via pkg (FreeBSD) +++++++++++++++++++++++++++++++++ @@ -219,6 +249,18 @@ To install on a Mac, make sure you have Homebrew, then run: $ brew update $ brew install ansible +.. _from_pkgutil: + +Latest Releases Via OpenCSW (Solaris) ++++++++++++++++++++++++++++++++++++++ + +Ansible is available for Solaris as `SysV package from OpenCSW `_. + +.. code-block:: bash + + # pkgadd -d http://get.opencsw.org/now + # /opt/csw/bin/pkgutil -i ansible + .. _from_pip: Latest Releases Via Pip diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index a70f2b059ca..5c38372e76b 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -19,7 +19,7 @@ pull inventory from dynamic or cloud sources, as described in :doc:`intro_dynami Hosts and Groups ++++++++++++++++ -The format for /etc/ansible/hosts is an INI format and looks like this:: +The format for /etc/ansible/hosts is an INI-like format and looks like this:: mail.example.com @@ -184,7 +184,7 @@ variables. Note that this only works on Ansible 1.4 or later. Tip: In Ansible 1.2 or later the group_vars/ and host_vars/ directories can exist in either the playbook directory OR the inventory directory. If both paths exist, variables in the playbook -directory will be loaded second. +directory will override variables set in the inventory directory. Tip: Keeping your inventory file and variables in a git repo (or other version control) is an excellent way to track changes to your inventory and host variables. @@ -205,8 +205,12 @@ mentioned:: The default ssh user name to use. ansible_ssh_pass The ssh password to use (this is insecure, we strongly recommend using --ask-pass or SSH keys) + ansible_sudo + The boolean to decide if sudo should be used for this host. Defaults to false. ansible_sudo_pass The sudo password to use (this is insecure, we strongly recommend using --ask-sudo-pass) + ansible_sudo_exe (new in version 1.8) + The sudo command path. ansible_connection Connection type of the host. Candidates are local, ssh or paramiko. The default is paramiko before Ansible 1.2, and 'smart' afterwards which detects whether usage of 'ssh' would be feasible based on whether ControlPersist is supported. ansible_ssh_private_key_file diff --git a/docsite/rst/intro_patterns.rst b/docsite/rst/intro_patterns.rst index 1dd1935f060..7830c97c491 100644 --- a/docsite/rst/intro_patterns.rst +++ b/docsite/rst/intro_patterns.rst @@ -68,6 +68,14 @@ It's also ok to mix wildcard patterns and groups at the same time:: one*.com:dbservers +As an advanced usage, you can also select the numbered server in a group:: + + webservers[0] + +Or a portion of servers in a group:: + + webservers[0:25] + Most people don't specify patterns as regular expressions, but you can. Just start the pattern with a '~':: ~(web|db).*\.example\.com @@ -76,6 +84,10 @@ While we're jumping a bit ahead, additionally, you can add an exclusion criteria ansible-playbook site.yml --limit datacenter2 +And if you want to read the list of hosts from a file, prefix the file name with '@'. Since Ansible 1.2:: + + ansible-playbook site.yml --limit @retry_hosts.txt + Easy enough. See :doc:`intro_adhoc` and then :doc:`playbooks` for how to apply this knowledge. .. seealso:: diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index 7774a6ce0c9..262fb7f0f03 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -45,7 +45,7 @@ In group_vars/windows.yml, define the following inventory variables:: # ansible-vault edit group_vars/windows.yml ansible_ssh_user: Administrator - ansible_ssh_pass: SekritPasswordGoesHere + ansible_ssh_pass: SecretPasswordGoesHere ansible_ssh_port: 5986 ansible_connection: winrm diff --git a/docsite/rst/playbooks_acceleration.rst b/docsite/rst/playbooks_acceleration.rst index b7f08828a84..40b77246db8 100644 --- a/docsite/rst/playbooks_acceleration.rst +++ b/docsite/rst/playbooks_acceleration.rst @@ -6,24 +6,24 @@ Accelerated Mode You Might Not Need This! ```````````````````````` -Are you running Ansible 1.5 or later? If so, you may not need accelerate mode due to a new feature called "SSH pipelining" and should read the :ref:`pipelining` section of the documentation. +Are you running Ansible 1.5 or later? If so, you may not need accelerated mode due to a new feature called "SSH pipelining" and should read the :ref:`pipelining` section of the documentation. -For users on 1.5 and later, accelerate mode only makes sense if you (A) are managing from an Enterprise Linux 6 or earlier host +For users on 1.5 and later, accelerated mode only makes sense if you (A) are managing from an Enterprise Linux 6 or earlier host and still are on paramiko, or (B) can't enable TTYs with sudo as described in the pipelining docs. If you can use pipelining, Ansible will reduce the amount of files transferred over the wire, -making everything much more efficient, and performance will be on par with accelerate mode in nearly all cases, possibly excluding very large file transfer. Because less moving parts are involved, pipelining is better than accelerate mode for nearly all use cases. +making everything much more efficient, and performance will be on par with accelerated mode in nearly all cases, possibly excluding very large file transfer. Because less moving parts are involved, pipelining is better than accelerated mode for nearly all use cases. -Accelerate mode remains around in support of EL6 +Accelerated moded remains around in support of EL6 control machines and other constrained environments. -Accelerate Mode Details -``````````````````````` +Accelerated Mode Details +```````````````````````` While OpenSSH using the ControlPersist feature is quite fast and scalable, there is a certain small amount of overhead involved in using SSH connections. While many people will not encounter a need, if you are running on a platform that doesn't have ControlPersist support (such as an EL6 control machine), you'll probably be even more interested in tuning options. -Accelerate mode is there to help connections work faster, but still uses SSH for initial secure key exchange. There is no +Accelerated mode is there to help connections work faster, but still uses SSH for initial secure key exchange. There is no additional public key infrastructure to manage, and this does not require things like NTP or even DNS. Accelerated mode can be anywhere from 2-6x faster than SSH with ControlPersist enabled, and 10x faster than paramiko. diff --git a/docsite/rst/playbooks_async.rst b/docsite/rst/playbooks_async.rst index b1636d13f65..7b72846fd97 100644 --- a/docsite/rst/playbooks_async.rst +++ b/docsite/rst/playbooks_async.rst @@ -56,6 +56,28 @@ Alternatively, if you do not need to wait on the task to complete, you may Using a higher value for ``--forks`` will result in kicking off asynchronous tasks even faster. This also increases the efficiency of polling. +If you would like to perform a variation of the "fire and forget" where you +"fire and forget, check on it later" you can perform a task similar to the +following:: + + --- + # Requires ansible 1.8+ + - name: 'YUM - fire and forget task' + yum: name=docker-io state=installed + async: 1000 + poll: 0 + register: yum_sleeper + + - name: 'YUM - check on fire and forget task' + async_status: jid={{ yum_sleeper.ansible_job_id }} + register: job_result + until: job_result.finished + retries: 30 + +.. note:: + If the value of ``async:`` is not high enough, this will cause the + "check on it later" task to fail because the temporary status file that + the ``async_status:`` is looking for will not have been written .. seealso:: diff --git a/docsite/rst/playbooks_best_practices.rst b/docsite/rst/playbooks_best_practices.rst index 7eee9cc68a7..43c642d583c 100644 --- a/docsite/rst/playbooks_best_practices.rst +++ b/docsite/rst/playbooks_best_practices.rst @@ -1,7 +1,7 @@ Best Practices ============== -Here are some tips for making the most of Ansible playbooks. +Here are some tips for making the most of Ansible and Ansible playbooks. You can find some example playbooks illustrating these best practices in our `ansible-examples repository `_. (NOTE: These may not use all of the features in the latest release, but are still an excellent reference!). @@ -12,10 +12,13 @@ You can find some example playbooks illustrating these best practices in our `an Content Organization ++++++++++++++++++++++ -The following section shows one of many possible ways to organize playbook content. Your usage of Ansible should fit your needs, however, not ours, so feel free to modify this approach and organize as you see fit. +The following section shows one of many possible ways to organize playbook content. -(One thing you will definitely want to do though, is use the "roles" organization feature, which is documented as part -of the main playbooks page. See :doc:`playbooks_roles`). +Your usage of Ansible should fit your needs, however, not ours, so feel free to modify this approach and organize as you see fit. + +One thing you will definitely want to do though, is use the "roles" organization feature, which is documented as part +of the main playbooks page. See :doc:`playbooks_roles`. You absolutely should be using roles. Roles are great. Use roles. Roles! +Did we say that enough? Roles are great. .. _directory_layout: @@ -34,6 +37,9 @@ The top level of the directory would contain files and directories like so:: hostname1 # if systems need specific variables, put them here hostname2 # "" + library/ # if any custom modules, put them here (optional) + filter_plugins/ # if any custom filter plugins, put them here (optional) + site.yml # master playbook webservers.yml # playbook for webserver tier dbservers.yml # playbook for dbserver tier @@ -51,6 +57,8 @@ The top level of the directory would contain files and directories like so:: foo.sh # <-- script files for use with the script resource vars/ # main.yml # <-- variables associated with this role + defaults/ # + main.yml # <-- default lower priority variables for this role meta/ # main.yml # <-- role dependencies @@ -58,12 +66,28 @@ The top level of the directory would contain files and directories like so:: monitoring/ # "" fooapp/ # "" +.. note: If you find yourself having too many top level playbooks (for instance you have a playbook you wrote for a specific hotfix, etc), it may make sense to have a playbooks/ directory instead. This can be a good idea as you get larger. If you do this, configure your roles_path in ansible.cfg to find your roles location. + +.. _use_dynamic_inventory_with_clouds: + +Use Dynamic Inventory With Clouds +````````````````````````````````` + +If you are using a cloud provider, you should not be managing your inventory in a static file. See :doc:`intro_dynamic_inventory`. + +This does not just apply to clouds -- If you have another system maintaining a canonical list of systems +in your infrastructure, usage of dynamic inventory is a great idea in general. + .. _stage_vs_prod: -How to Arrange Inventory, Stage vs Production -````````````````````````````````````````````` +How to Differentiate Stage vs Production +````````````````````````````````````````` -In the example below, the *production* file contains the inventory of all of your production hosts. Of course you can pull inventory from an external data source as well, but this is just a basic example. +If managing static inventory, it is frequently asked how to differentiate different types of environments. The following example +shows a good way to do this. Similar methods of grouping could be adapted to dynamic inventory (for instance, consider applying the AWS +tag "environment:production", and you'll get a group of systems automatically discovered named "ec2_tag_environment_production". + +Let's show a static inventory example though. Below, the *production* file contains the inventory of all of your production hosts. It is suggested that you define groups based on purpose of the host (roles) and also geography or datacenter location (if applicable):: @@ -104,13 +128,14 @@ It is suggested that you define groups based on purpose of the host (roles) and boston-webservers boston-dbservers - .. _groups_and_hosts: Group And Host Variables ```````````````````````` -Now, groups are nice for organization, but that's not all groups are good for. You can also assign variables to them! For instance, atlanta has its own NTP servers, so when setting up ntp.conf, we should use them. Let's set those now:: +This section extends on the previous example. + +Groups are nice for organization, but that's not all groups are good for. You can also assign variables to them! For instance, atlanta has its own NTP servers, so when setting up ntp.conf, we should use them. Let's set those now:: --- # file: group_vars/atlanta @@ -138,6 +163,9 @@ We can define specific hardware variance in systems in a host_vars file, but avo foo_agent_port: 86 bar_agent_port: 99 +Again, if we are using dynamic inventory sources, many dynamic groups are automatically created. So a tag like "class:webserver" would load in +variables from the file "group_vars/ec2_tag_class_webserver" automatically. + .. _split_by_role: Top Level Playbooks Are Separated By Role @@ -160,6 +188,12 @@ In a file like webservers.yml (also at the top level), we simply map the configu - common - webtier +The idea here is that we can choose to configure our whole infrastructure by "running" site.yml or we could just choose to run a subset by running +webservers.yml. This is analogous to the "--limit" parameter to ansible but a little more explicit:: + + ansible-playbook site.yml --limit webservers + ansible-playbook webservers.yml + .. _role_organization: Task And Handler Organization For A Role @@ -284,7 +318,7 @@ parameter in your playbooks to make it clear, especially as some modules support Group By Roles ++++++++++++++ -A system can be in multiple groups. See :doc:`intro_inventory` and :doc:`intro_patterns`. Having groups named after things like +We're somewhat repeating ourselves with this tip, but it's worth repeating. A system can be in multiple groups. See :doc:`intro_inventory` and :doc:`intro_patterns`. Having groups named after things like *webservers* and *dbservers* is repeated in the examples because it's a very powerful concept. This allows playbooks to target machines based on role, as well as to assign role specific variables @@ -297,7 +331,7 @@ See :doc:`playbooks_roles`. Operating System and Distribution Variance ++++++++++++++++++++++++++++++++++++++++++ -When dealing with a parameter that is different between two different operating systems, the best way to handle this is +When dealing with a parameter that is different between two different operating systems, a great way to handle this is by using the group_by module. This makes a dynamic group of hosts matching certain criteria, even if that group is not defined in the inventory file:: @@ -305,20 +339,19 @@ This makes a dynamic group of hosts matching certain criteria, even if that grou --- # talk to all hosts just so we can learn about them - - hosts: all - tasks: - - group_by: key={{ ansible_distribution }} + - group_by: key=os_{{ ansible_distribution }} # now just on the CentOS hosts... - - hosts: CentOS + - hosts: os_CentOS gather_facts: False - tasks: - # tasks that only happen on CentOS go here +This will throw all systems into a dynamic group based on the operating system name. + If group-specific settings are needed, this can also be done. For example:: --- @@ -326,20 +359,29 @@ If group-specific settings are needed, this can also be done. For example:: asdf: 10 --- - # file: group_vars/CentOS + # file: group_vars/os_CentOS asdf: 42 In the above example, CentOS machines get the value of '42' for asdf, but other machines get '10'. +This can be used not only to set variables, but also to apply certain roles to only certain systems. + +Alternatively, if only variables are needed:: + + - hosts: all + tasks: + - include_vars: "os_{{ ansible_distribution }}.yml" + - debug: var=asdf + +This will pull in variables based on the OS name. .. _ship_modules_with_playbooks: Bundling Ansible Modules With Playbooks +++++++++++++++++++++++++++++++++++++++ -.. versionadded:: 0.5 - If a playbook has a "./library" directory relative to its YAML file, this directory can be used to add ansible modules that will -automatically be in the ansible module path. This is a great way to keep modules that go with a playbook together. +automatically be in the ansible module path. This is a great way to keep modules that go with a playbook together. This is shown +in the directory structure example at the start of this section. .. _whitespace: @@ -367,6 +409,8 @@ for you. For example, you will probably not need ``vars``, ``vars_files``, ``vars_prompt`` and ``--extra-vars`` all at once, while also using an external inventory file. +If something feels complicated, it probably is, and may be a good opportunity to simplify things. + .. _version_control: Version Control @@ -393,3 +437,4 @@ changed the rules that are automating your infrastructure. Complete playbook files from the github project source `Mailing List `_ Questions? Help? Ideas? Stop by the list on Google Groups + diff --git a/docsite/rst/playbooks_conditionals.rst b/docsite/rst/playbooks_conditionals.rst index a00ec916c41..d71a0d3c7a0 100644 --- a/docsite/rst/playbooks_conditionals.rst +++ b/docsite/rst/playbooks_conditionals.rst @@ -166,11 +166,11 @@ To use this conditional import feature, you'll need facter or ohai installed pri you can of course push this out with Ansible if you like:: # for facter - ansible -m yum -a "pkg=facter ensure=installed" - ansible -m yum -a "pkg=ruby-json ensure=installed" + ansible -m yum -a "pkg=facter state=present" + ansible -m yum -a "pkg=ruby-json state=present" # for ohai - ansible -m yum -a "pkg=ohai ensure=installed" + ansible -m yum -a "pkg=ohai state=present" Ansible's approach to configuration -- separating variables from tasks, keeps your playbooks from turning into arbitrary code with ugly nested ifs, conditionals, and so on - and results diff --git a/docsite/rst/playbooks_delegation.rst b/docsite/rst/playbooks_delegation.rst index e4640afbfa8..8f672791add 100644 --- a/docsite/rst/playbooks_delegation.rst +++ b/docsite/rst/playbooks_delegation.rst @@ -161,7 +161,7 @@ This can be optionally paired with "delegate_to" to specify an individual host t When "run_once" is not used with "delegate_to" it will execute on the first host, as defined by inventory, in the group(s) of hosts targeted by the play. e.g. webservers[0] if the play targeted "hosts: webservers". -This aproach is similar, although more concise and cleaner than applying a conditional to a task such as:: +This approach is similar, although more concise and cleaner than applying a conditional to a task such as:: - command: /opt/application/upgrade_db.py when: inventory_hostname == webservers[0] @@ -175,7 +175,7 @@ It may be useful to use a playbook locally, rather than by connecting over SSH. for assuring the configuration of a system by putting a playbook on a crontab. This may also be used to run a playbook inside an OS installer, such as an Anaconda kickstart. -To run an entire playbook locally, just set the "hosts:" line to "hosts:127.0.0.1" and then run the playbook like so:: +To run an entire playbook locally, just set the "hosts:" line to "hosts: 127.0.0.1" and then run the playbook like so:: ansible-playbook playbook.yml --connection=local diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index 70db3f7fe27..ecf8d46de1e 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -151,8 +151,8 @@ Just `Control-C` to kill it and run it again with `-K`. These are deleted immediately after the command is executed. This only occurs when sudoing from a user like 'bob' to 'timmy', not when going from 'bob' to 'root', or logging in directly as 'bob' or - 'root'. If this concerns you that this data is briefly readable - (not writable), avoid transferring uncrypted passwords with + 'root'. If it concerns you that this data is briefly readable + (not writable), avoid transferring unencrypted passwords with `sudo_user` set. In other cases, '/tmp' is not used and this does not come into play. Ansible also takes care to not log password parameters. @@ -196,7 +196,7 @@ it is recommended that you use the more conventional "module: options" format. This recommended format is used throughout the documentation, but you may encounter the older format in some playbooks. -Here is what a basic task looks like, as with most modules, +Here is what a basic task looks like. As with most modules, the service module takes key=value arguments:: tasks: diff --git a/docsite/rst/playbooks_lookups.rst b/docsite/rst/playbooks_lookups.rst index 3eb5ebb35cc..f33e21a3c5b 100644 --- a/docsite/rst/playbooks_lookups.rst +++ b/docsite/rst/playbooks_lookups.rst @@ -121,10 +121,17 @@ Here are some examples:: - debug: msg="{{ lookup('redis_kv', 'redis://localhost:6379,somekey') }} is value in Redis for somekey" + # dnstxt lookup requires the Python dnspython package - debug: msg="{{ lookup('dnstxt', 'example.com') }} is a DNS TXT record for example.com" - debug: msg="{{ lookup('template', './some_template.j2') }} is a value from evaluation of this template" + - debug: msg="{{ lookup('etcd', 'foo') }} is a value from a locally running etcd" + + - debug: msg="{{item}}" + with_url: + - 'http://github.com/gremlin.keys' + As an alternative you can also assign lookup plugins to variables or use them elsewhere. This macros are evaluated each time they are used in a task (or template):: diff --git a/docsite/rst/playbooks_prompts.rst b/docsite/rst/playbooks_prompts.rst index c20e59e0791..29fc218fe86 100644 --- a/docsite/rst/playbooks_prompts.rst +++ b/docsite/rst/playbooks_prompts.rst @@ -55,7 +55,7 @@ entered value so you can use it, for instance, with the user module to define a - name: "my_password2" prompt: "Enter password2" private: yes - encrypt: "md5_crypt" + encrypt: "sha512_crypt" confirm: yes salt_size: 7 diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index eb7b812ea71..3ffabe835d3 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -61,19 +61,19 @@ For instance, if deploying multiple wordpress instances, I could contain all of my wordpress tasks in a single wordpress.yml file, and use it like so:: tasks: - - include: wordpress.yml user=timmy - - include: wordpress.yml user=alice - - include: wordpress.yml user=bob + - include: wordpress.yml wp_user=timmy + - include: wordpress.yml wp_user=alice + - include: wordpress.yml wp_user=bob If you are running Ansible 1.4 and later, include syntax is streamlined to match roles, and also allows passing list and dictionary parameters:: tasks: - - { include: wordpress.yml, user: timmy, ssh_keys: [ 'keys/one.txt', 'keys/two.txt' ] } + - { include: wordpress.yml, wp_user: timmy, ssh_keys: [ 'keys/one.txt', 'keys/two.txt' ] } Using either syntax, variables passed in can then be used in the included files. We'll cover them in :doc:`playbooks_variables`. You can reference them like this:: - {{ user }} + {{ wp_user }} (In addition to the explicitly passed-in parameters, all variables from the vars section are also available for use here as well.) @@ -85,7 +85,7 @@ which also supports structured variables:: - include: wordpress.yml vars: - remote_user: timmy + wp_user: timmy some_list_variable: - alpha - beta @@ -153,7 +153,7 @@ Roles .. versionadded:: 1.2 -Now that you have learned about vars_files, tasks, and handlers, what is the best way to organize your playbooks? +Now that you have learned about tasks and handlers, what is the best way to organize your playbooks? The short answer is to use roles! Roles are ways of automatically loading certain vars_files, tasks, and handlers based on a known file structure. Grouping content by roles also allows easy sharing of roles with other users. @@ -172,6 +172,7 @@ Example project structure:: tasks/ handlers/ vars/ + defaults/ meta/ webservers/ files/ @@ -179,6 +180,7 @@ Example project structure:: tasks/ handlers/ vars/ + defaults/ meta/ In a playbook, it would look like this:: diff --git a/docsite/rst/playbooks_special_topics.rst b/docsite/rst/playbooks_special_topics.rst index 078b27f2533..c57f5796c96 100644 --- a/docsite/rst/playbooks_special_topics.rst +++ b/docsite/rst/playbooks_special_topics.rst @@ -17,3 +17,4 @@ and adopt these only if they seem relevant or useful to your environment. playbooks_prompts playbooks_tags playbooks_vault + playbooks_startnstep diff --git a/docsite/rst/playbooks_startnstep.rst b/docsite/rst/playbooks_startnstep.rst new file mode 100644 index 00000000000..1067c3e1214 --- /dev/null +++ b/docsite/rst/playbooks_startnstep.rst @@ -0,0 +1,34 @@ +Start and Step +====================== + +This shows a few alternative ways to run playbooks. These modes are very useful for testing new plays or debugging. + + +.. _start_at_task: + +Start-at-task +````````````` +If you want to start executing your playbook at a particular task, you can do so with the ``--start-at`` option:: + + ansible-playbook playbook.yml --start-at="install packages" + +The above will start executing your playbook at a task named "install packages". + + +.. _step: + +Step +```` + +Playbooks can also be executed interactively with ``--step``:: + + ansible-playbook playbook.yml --step + +This will cause ansible to stop on each task, and ask if it should execute that task. +Say you had a task called "configure ssh", the playbook run will stop and ask:: + + Perform task: configure ssh (y/n/c): + +Answering "y" will execute the task, answering "n" will skip the task, and answering "c" +will continue executing all the remaining tasks without asking. + diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index e20e1d6dbe0..39a9f4bfa5f 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -297,18 +297,100 @@ Get a random number from 1 to 100 but in steps of 10:: {{ 100 |random(start=1, step=10) }} => 51 +Shuffle Filter +-------------- + +.. versionadded:: 1.8 + +This filter will randomize an existing list, giving a different order every invocation. + +To get a random list from an existing list:: + + {{ ['a','b','c']|shuffle }} => ['c','a','b'] + {{ ['a','b','c']|shuffle }} => ['b','c','a'] + +note that when used with a non 'listable' item it is a noop, otherwise it always returns a list + + +.. _math_stuff: + +Math +-------------------- +.. versionadded:: 1.9 + + +To see if something is actually a number:: + + {{ myvar | isnan }} + +Get the logarithm (default is e):: + + {{ myvar | log }} + +Get the base 10 logarithm:: + + {{ myvar | log(10) }} + +Give me the power of 2! (or 5):: + + {{ myvar | pow(2) }} + {{ myvar | pow(5) }} + +Square root, or the 5th:: + + {{ myvar | root }} + {{ myvar | root(5) }} + +Note that jinja2 already provides some like abs() and round(). + + +.. _hash_filters: + +Hashing filters +-------------------- +.. versionadded:: 1.9 + +To get the sha1 hash of a string:: + + {{ 'test1'|hash('sha1') }} + +To get the md5 hash of a string:: + + {{ 'test1'|hash('md5') }} + +Get a string checksum:: + + {{ 'test2'|checksum }} + +Other hashes (platform dependant):: + + {{ 'test2'|hash('blowfish') }} + +To get a sha512 password hash (random salt):: + + {{ 'passwordsaresecret'|password_hash('sha512') }} + +To get a sha256 password hash with a specific salt:: + + {{ 'secretpassword'|password_hash('sha256', 'mysecretsalt') }} + + +Hash types available depend on the master system running ansible, +'hash' depends on hashlib password_hash depends on crypt. + + .. _other_useful_filters: Other Useful Filters -------------------- To concatenate a list into a string:: - + {{ list | join(" ") }} To get the last name of a file path, like 'foo.txt' out of '/etc/asdf/foo.txt':: - {{ path | basename }} + {{ path | basename }} To get the directory from a path:: @@ -318,14 +400,18 @@ To expand a path containing a tilde (`~`) character (new in version 1.5):: {{ path | expanduser }} +To get the real path of a link (new in version 1.8):: + + {{ path | readlink }} + To work with Base64 encoded strings:: {{ encoded | b64decode }} {{ decoded | b64encode }} -To take an md5sum of a filename:: +To create a UUID from a string (new in version 1.9):: - {{ filename | md5 }} + {{ hostname | to_uuid }} To cast values as certain types, such as when you input a string as "True" from a vars_prompt and the system doesn't know it is a boolean value:: @@ -355,6 +441,9 @@ To replace text in a string with regex, use the "regex_replace" filter:: # convert "foobar" to "bar" {{ 'foobar' | regex_replace('^f.*o(.*)$', '\\1') }} +.. note:: If "regex_replace" filter is used with variables inside YAML arguments (as opposed to simpler 'key=value' arguments), + then you need to escape backreferences (e.g. ``\\1``) with 4 backslashes (``\\\\``) instead of 2 (``\\``). + A few useful filters are typically added with each new Ansible release. The development documentation shows how to extend Ansible filters by writing your own as plugins, though in general, we encourage new ones to be added to core so everyone can make use of them. @@ -685,7 +774,7 @@ And you will see the following fact added:: "ansible_local": { "preferences": { "general": { - "asdf" : "1", + "asdf" : "1", "bar" : "2" } } @@ -703,7 +792,7 @@ can allow that fact to be used during that particular play. Otherwise, it will Here is an example of what that might look like:: - hosts: webservers - tasks: + tasks: - name: create directory for ansible custom facts file: state=directory recurse=yes path=/etc/ansible/facts.d - name: install custom impi fact @@ -740,10 +829,14 @@ the fact that they have not been communicated with in the current execution of / To configure fact caching, enable it in ansible.cfg as follows:: [defaults] + gathering = smart fact_caching = redis - fact_caching_timeout = 86400 # seconds + fact_caching_timeout = 86400 + # seconds -At the time of writing, Redis is the only supported fact caching engine. +You might also want to change the 'gathering' setting to 'smart' or 'explicit' or set gather_facts to False in most plays. + +At the time of writing, Redis is the only supported fact caching engine. To get redis up and running, perform the equivalent OS commands:: yum install redis @@ -838,6 +931,7 @@ A frequently used idiom is walking a group to find all IP addresses in that grou {% endfor %} An example of this could include pointing a frontend proxy server to all of the app servers, setting up the correct firewall rules between servers, etc. +You need to make sure that the facts of those hosts have been populated before though, for example by running a play against them if the facts have not been cached recently (fact caching was added in Ansible 1.8). Additionally, *inventory_hostname* is the name of the hostname as configured in Ansible's inventory host file. This can be useful for when you don't want to rely on the discovered hostname `ansible_hostname` or for other mysterious @@ -846,6 +940,8 @@ period, without the rest of the domain. *play_hosts* is available as a list of hostnames that are in scope for the current play. This may be useful for filling out templates with multiple hostnames or for injecting the list into the rules for a load balancer. +*delegate_to* is the inventory hostname of the host that the current task has been delegated to using 'delegate_to'. + Don't worry about any of this unless you think you need it. You'll know when you do. Also available, *inventory_dir* is the pathname of the directory holding Ansible's inventory host file, *inventory_file* is the pathname and the filename pointing to the Ansible's inventory host file. @@ -889,7 +985,7 @@ The contents of each variables file is a simple YAML dictionary, like this:: .. note:: It's also possible to keep per-host and per-group variables in very - similar files, this is covered in :doc:`intro_patterns`. + similar files, this is covered in :ref:`splitting_out_vars`. .. _passing_variables_on_the_command_line: @@ -948,9 +1044,10 @@ a use for it. If multiple variables of the same name are defined in different places, they win in a certain order, which is:: - * -e variables always win - * then comes "most everything else" - * then comes variables defined in inventory + * extra vars (-e in the command line) always win + * then comes connection variables defined in inventory (ansible_ssh_user, etc) + * then comes "most everything else" (command line switches, vars in play, included vars, role vars, etc) + * then comes the rest of the variables defined in inventory * then comes facts discovered about a system * then "role defaults", which are the most "defaulty" and lose in priority to everything. diff --git a/docsite/rst/quickstart.rst b/docsite/rst/quickstart.rst index 3d2eaca94f0..161748d9f02 100644 --- a/docsite/rst/quickstart.rst +++ b/docsite/rst/quickstart.rst @@ -3,7 +3,7 @@ Quickstart Video We've recorded a short video that shows how to get started with Ansible that you may like to use alongside the documentation. -The `quickstart video `_ is about 20 minutes long and will show you some of the basics about your +The `quickstart video `_ is about 30 minutes long and will show you some of the basics about your first steps with Ansible. Enjoy, and be sure to visit the rest of the documentation to learn more. diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 5147cc01e31..67aa039608e 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -11,8 +11,8 @@ # some basic default values... -hostfile = /etc/ansible/hosts -library = /usr/share/ansible +inventory = /etc/ansible/hosts +#library = /usr/share/my_modules/ remote_tmp = $HOME/.ansible/tmp pattern = * forks = 5 @@ -21,7 +21,7 @@ sudo_user = root #ask_sudo_pass = True #ask_pass = True transport = smart -remote_port = 22 +#remote_port = 22 module_lang = C # plays will gather facts by default, which contain information about @@ -147,13 +147,18 @@ filter_plugins = /usr/share/ansible_plugins/filter_plugins # avoid issues. #http_user_agent = ansible-agent -# if set to a persistant type (not 'memory', for example 'redis') fact values +# if set to a persistent type (not 'memory', for example 'redis') fact values # from previous runs in Ansible will be stored. This may be useful when # wanting to use, for example, IP information from one group of servers # without having to talk to them in the same playbook run to get their # current IP information. fact_caching = memory + +# retry files +#retry_files_enabled = False +#retry_files_save_path = ~/.ansible-retry + [paramiko_connection] # uncomment this line to cause the paramiko connection plugin to not record new host diff --git a/examples/scripts/ConfigureRemotingForAnsible.ps1 b/examples/scripts/ConfigureRemotingForAnsible.ps1 index 1cb3375725b..1b45ce442bf 100644 --- a/examples/scripts/ConfigureRemotingForAnsible.ps1 +++ b/examples/scripts/ConfigureRemotingForAnsible.ps1 @@ -1,11 +1,18 @@ -# Script to set a windows computer up for remoting -# The script checks the current WinRM/Remoting configuration and makes the necessary changes -# set $VerbosePreference="Continue" before running the script in order to see the output of the script +# Configure a Windows host for remote management with Ansible +# ----------------------------------------------------------- +# +# This script checks the current WinRM/PSRemoting configuration and makes the +# necessary changes to allow Ansible to connect, authenticate and execute +# PowerShell commands. +# +# Set $VerbosePreference = "Continue" before running the script in order to +# see the output messages. # # Written by Trond Hindenes +# Updated by Chris Church # # Version 1.0 - July 6th, 2014 - +# Version 1.1 - November 11th, 2014 Param ( [string]$SubjectName = $env:COMPUTERNAME, @@ -14,7 +21,6 @@ Param ( ) -#region function defs Function New-LegacySelfSignedCert { Param ( @@ -22,10 +28,10 @@ Function New-LegacySelfSignedCert [int]$ValidDays = 365 ) - $name = new-object -com "X509Enrollment.CX500DistinguishedName.1" + $name = New-Object -COM "X509Enrollment.CX500DistinguishedName.1" $name.Encode("CN=$SubjectName", 0) - $key = new-object -com "X509Enrollment.CX509PrivateKey.1" + $key = New-Object -COM "X509Enrollment.CX509PrivateKey.1" $key.ProviderName = "Microsoft RSA SChannel Cryptographic Provider" $key.KeySpec = 1 $key.Length = 1024 @@ -33,149 +39,160 @@ Function New-LegacySelfSignedCert $key.MachineContext = 1 $key.Create() - $serverauthoid = new-object -com "X509Enrollment.CObjectId.1" + $serverauthoid = New-Object -COM "X509Enrollment.CObjectId.1" $serverauthoid.InitializeFromValue("1.3.6.1.5.5.7.3.1") - $ekuoids = new-object -com "X509Enrollment.CObjectIds.1" - $ekuoids.add($serverauthoid) - $ekuext = new-object -com "X509Enrollment.CX509ExtensionEnhancedKeyUsage.1" + $ekuoids = New-Object -COM "X509Enrollment.CObjectIds.1" + $ekuoids.Add($serverauthoid) + $ekuext = New-Object -COM "X509Enrollment.CX509ExtensionEnhancedKeyUsage.1" $ekuext.InitializeEncode($ekuoids) - $cert = new-object -com "X509Enrollment.CX509CertificateRequestCertificate.1" + $cert = New-Object -COM "X509Enrollment.CX509CertificateRequestCertificate.1" $cert.InitializeFromPrivateKey(2, $key, "") $cert.Subject = $name $cert.Issuer = $cert.Subject - $cert.NotBefore = (get-date).addDays(-1) + $cert.NotBefore = (Get-Date).AddDays(-1) $cert.NotAfter = $cert.NotBefore.AddDays($ValidDays) $cert.X509Extensions.Add($ekuext) $cert.Encode() - $enrollment = new-object -com "X509Enrollment.CX509Enrollment.1" + $enrollment = New-Object -COM "X509Enrollment.CX509Enrollment.1" $enrollment.InitializeFromRequest($cert) $certdata = $enrollment.CreateRequest(0) $enrollment.InstallResponse(2, $certdata, 0, "") - #return the thumprint of the last installed cert - ls "Cert:\LocalMachine\my"| Sort-Object notbefore -Descending | select -First 1 | select -expand Thumbprint + # Return the thumbprint of the last installed cert. + Get-ChildItem "Cert:\LocalMachine\my"| Sort-Object NotBefore -Descending | Select -First 1 | Select -Expand Thumbprint } -#endregion -#Start script +# Setup error handling. +Trap +{ + $_ + Exit 1 +} $ErrorActionPreference = "Stop" -#Detect PowerShell version -if ($PSVersionTable.PSVersion.Major -lt 3) + +# Detect PowerShell version. +If ($PSVersionTable.PSVersion.Major -lt 3) { - Write-Error "PowerShell/Windows Management Framework needs to be updated to 3 or higher. Stopping script" + Throw "PowerShell version 3 or higher is required." } -#Detect OS - $Win32_OS = Get-WmiObject Win32_OperatingSystem - switch ($Win32_OS.Version) - { - "6.2.9200" {$OSVersion = "Windows Server 2012"} - "6.1.7601" {$OSVersion = "Windows Server 2008R2"} - } - - - #Set up remoting - Write-verbose "Verifying WS-MAN" - if (!(get-service "WinRM")) - { - Write-Error "I couldnt find the winRM service on this computer. Stopping" - } - Elseif ((get-service "WinRM").Status -ne "Running") - { - Write-Verbose "Starting WinRM" +# Find and start the WinRM service. +Write-Verbose "Verifying WinRM service." +If (!(Get-Service "WinRM")) +{ + Throw "Unable to find the WinRM service." +} +ElseIf ((Get-Service "WinRM").Status -ne "Running") +{ + Write-Verbose "Starting WinRM service." Start-Service -Name "WinRM" -ErrorAction Stop - } - - #At this point, winrm should be running - #Check that we have a ps session config - if (!(Get-PSSessionConfiguration -verbose:$false) -or (!(get-childitem WSMan:\localhost\Listener))) - { - Write-Verbose "PS remoting is not enabled. Activating" - try - { - Enable-PSRemoting -Force -ErrorAction SilentlyContinue - } - catch{} - } - Else - { - Write-Verbose "PS remoting is already active and running" - } - - #At this point, test a remoting connection to localhost, which should work - $result = invoke-command -ComputerName localhost -ScriptBlock {$env:computername} -ErrorVariable localremotingerror -ErrorAction SilentlyContinue - - $options = New-PSSessionOption -SkipCACheck -SkipCNCheck -SkipRevocationCheck - $resultssl = New-PSSession -UseSSL -ComputerName "localhost" -SessionOption $options -ErrorVariable localremotingsslerror -ErrorAction SilentlyContinue +} - if (!$result -and $resultssl) - { - Write-Verbose "HTTP-based sessions not enabled, HTTPS based sessions enabled" - } - ElseIf (!$result -and !$resultssl) - { - Write-error "Could not establish session on either HTTP or HTTPS. Breaking" - } +# WinRM should be running; check that we have a PS session config. +If (!(Get-PSSessionConfiguration -Verbose:$false) -or (!(Get-ChildItem WSMan:\localhost\Listener))) +{ + Write-Verbose "Enabling PS Remoting." + Enable-PSRemoting -Force -ErrorAction Stop +} +Else +{ + Write-Verbose "PS Remoting is already enabled." +} - #at this point, make sure there is a SSL-based listener - $listeners = dir WSMan:\localhost\Listener +# Test a remoting connection to localhost, which should work. +$httpResult = Invoke-Command -ComputerName "localhost" -ScriptBlock {$env:COMPUTERNAME} -ErrorVariable httpError -ErrorAction SilentlyContinue +$httpsOptions = New-PSSessionOption -SkipCACheck -SkipCNCheck -SkipRevocationCheck - if (!($listeners | where {$_.Keys -like "TRANSPORT=HTTPS"})) - { - #HTTPS-based endpoint does not exist. - if (($CreateSelfSignedCert) -and ($OSVersion -notmatch "2012")) - { - $thumprint = New-LegacySelfSignedCert -SubjectName $env:COMPUTERNAME - } - if (($CreateSelfSignedCert) -and ($OSVersion -match "2012")) +$httpsResult = New-PSSession -UseSSL -ComputerName "localhost" -SessionOption $httpsOptions -ErrorVariable httpsError -ErrorAction SilentlyContinue + +If ($httpResult -and $httpsResult) +{ + Write-Verbose "HTTP and HTTPS sessions are enabled." +} +ElseIf ($httpsResult -and !$httpResult) +{ + Write-Verbose "HTTP sessions are disabled, HTTPS session are enabled." +} +ElseIf ($httpResult -and !$httpsResult) +{ + Write-Verbose "HTTPS sessions are disabled, HTTP session are enabled." +} +Else +{ + Throw "Unable to establish an HTTP or HTTPS remoting session." +} + + +# Make sure there is a SSL listener. +$listeners = Get-ChildItem WSMan:\localhost\Listener +If (!($listeners | Where {$_.Keys -like "TRANSPORT=HTTPS"})) +{ + # HTTPS-based endpoint does not exist. + If (Get-Command "New-SelfSignedCertificate" -ErrorAction SilentlyContinue) { $cert = New-SelfSignedCertificate -DnsName $env:COMPUTERNAME -CertStoreLocation "Cert:\LocalMachine\My" - $thumprint = $cert.Thumbprint + $thumbprint = $cert.Thumbprint } - - - + Else + { + $thumbprint = New-LegacySelfSignedCert -SubjectName $env:COMPUTERNAME + } + # Create the hashtables of settings to be used. $valueset = @{} - $valueset.add('Hostname',$env:COMPUTERNAME) - $valueset.add('CertificateThumbprint',$thumprint) + $valueset.Add('Hostname', $env:COMPUTERNAME) + $valueset.Add('CertificateThumbprint', $thumbprint) $selectorset = @{} - $selectorset.add('Transport','HTTPS') - $selectorset.add('Address','*') + $selectorset.Add('Transport', 'HTTPS') + $selectorset.Add('Address', '*') - Write-Verbose "Enabling SSL-based remoting" - New-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset -ValueSet $valueset - } - Else - { - Write-Verbose "SSL-based remoting already active" - } + Write-Verbose "Enabling SSL listener." + New-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset -ValueSet $valueset +} +Else +{ + Write-Verbose "SSL listener is already active." +} - #Check for basic authentication - $basicauthsetting = Get-ChildItem WSMan:\localhost\Service\Auth | where {$_.Name -eq "Basic"} - - if (($basicauthsetting.Value) -eq $false) - { - Write-Verbose "Enabling basic auth" +# Check for basic authentication. +$basicAuthSetting = Get-ChildItem WSMan:\localhost\Service\Auth | Where {$_.Name -eq "Basic"} +If (($basicAuthSetting.Value) -eq $false) +{ + Write-Verbose "Enabling basic auth support." Set-Item -Path "WSMan:\localhost\Service\Auth\Basic" -Value $true - } - Else - { - Write-verbose "basic auth already enabled" - } - -#FIrewall -netsh advfirewall firewall add rule Profile=public name="Allow WinRM HTTPS" dir=in localport=5986 protocol=TCP action=allow +} +Else +{ + Write-Verbose "Basic auth is already enabled." +} +# Configure firewall to allow WinRM HTTPS connections. +$fwtest1 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS" +$fwtest2 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS" profile=any +If ($fwtest1.count -lt 5) +{ + Write-Verbose "Adding firewall rule to allow WinRM HTTPS." + netsh advfirewall firewall add rule profile=any name="Allow WinRM HTTPS" dir=in localport=5986 protocol=TCP action=allow +} +ElseIf (($fwtest1.count -ge 5) -and ($fwtest2.count -lt 5)) +{ + Write-Verbose "Updating firewall rule to allow WinRM HTTPS for any profile." + netsh advfirewall firewall set rule name="Allow WinRM HTTPS" new profile=any +} +Else +{ + Write-Verbose "Firewall rule already exists to allow WinRM HTTPS." +} - Write-Verbose "PS Remoting successfully setup for Ansible" + +Write-Verbose "PS Remoting has been successfully configured for Ansible." diff --git a/examples/scripts/upgrade_to_ps3.ps1 b/examples/scripts/upgrade_to_ps3.ps1 index 693088b75f8..c9f55267e48 100644 --- a/examples/scripts/upgrade_to_ps3.ps1 +++ b/examples/scripts/upgrade_to_ps3.ps1 @@ -62,13 +62,24 @@ if ([Environment]::OSVersion.Version.Major -gt 6) $osminor = [environment]::OSVersion.Version.Minor +$architecture = $ENV:PROCESSOR_ARCHITECTURE + +if ($architecture -eq "AMD64") +{ + $architecture = "x64" +} +else +{ + $architecture = "x86" +} + if ($osminor -eq 1) { - $DownloadUrl = "http://download.microsoft.com/download/E/7/6/E76850B8-DA6E-4FF5-8CCE-A24FC513FD16/Windows6.1-KB2506143-x64.msu" + $DownloadUrl = "http://download.microsoft.com/download/E/7/6/E76850B8-DA6E-4FF5-8CCE-A24FC513FD16/Windows6.1-KB2506143-" + $architecture + ".msu" } elseif ($osminor -eq 0) { - $DownloadUrl = "http://download.microsoft.com/download/E/7/6/E76850B8-DA6E-4FF5-8CCE-A24FC513FD16/Windows6.0-KB2506146-x64.msu" + $DownloadUrl = "http://download.microsoft.com/download/E/7/6/E76850B8-DA6E-4FF5-8CCE-A24FC513FD16/Windows6.0-KB2506146-" + $architecture + ".msu" } else { diff --git a/hacking/env-setup b/hacking/env-setup old mode 100755 new mode 100644 index 6e4de1af72b..9b9a529d13a --- a/hacking/env-setup +++ b/hacking/env-setup @@ -1,45 +1,78 @@ -#!/bin/bash -# usage: source ./hacking/env-setup [-q] +# usage: source hacking/env-setup [-q] # modifies environment for running Ansible from checkout +# Default values for shell variables we use +PYTHONPATH=${PYTHONPATH-""} +PATH=${PATH-""} +MANPATH=${MANPATH-""} +verbosity=${1-info} # Defaults to `info' if unspecified + +if [ "$verbosity" = -q ]; then + verbosity=silent +fi + # When run using source as directed, $0 gets set to bash, so we must use $BASH_SOURCE if [ -n "$BASH_SOURCE" ] ; then - HACKING_DIR=`dirname $BASH_SOURCE` -elif [ $(basename $0) = "env-setup" ]; then - HACKING_DIR=`dirname $0` + HACKING_DIR=$(dirname "$BASH_SOURCE") +elif [ $(basename -- "$0") = "env-setup" ]; then + HACKING_DIR=$(dirname "$0") +elif [ -n "$KSH_VERSION" ]; then + HACKING_DIR=$(dirname "${.sh.file}") else HACKING_DIR="$PWD/hacking" fi # The below is an alternative to readlink -fn which doesn't exist on OS X # Source: http://stackoverflow.com/a/1678636 -FULL_PATH=`python -c "import os; print(os.path.realpath('$HACKING_DIR'))"` -ANSIBLE_HOME=`dirname "$FULL_PATH"` +FULL_PATH=$(python -c "import os; print(os.path.realpath('$HACKING_DIR'))") +ANSIBLE_HOME=$(dirname "$FULL_PATH") PREFIX_PYTHONPATH="$ANSIBLE_HOME/lib" PREFIX_PATH="$ANSIBLE_HOME/bin" PREFIX_MANPATH="$ANSIBLE_HOME/docs/man" -[[ $PYTHONPATH != ${PREFIX_PYTHONPATH}* ]] && export PYTHONPATH=$PREFIX_PYTHONPATH:$PYTHONPATH -[[ $PATH != ${PREFIX_PATH}* ]] && export PATH=$PREFIX_PATH:$PATH -unset ANSIBLE_LIBRARY -export ANSIBLE_LIBRARY="$ANSIBLE_HOME/library:`python $HACKING_DIR/get_library.py`" -[[ $MANPATH != ${PREFIX_MANPATH}* ]] && export MANPATH=$PREFIX_MANPATH:$MANPATH +expr "$PYTHONPATH" : "${PREFIX_PYTHONPATH}.*" > /dev/null || export PYTHONPATH="$PREFIX_PYTHONPATH:$PYTHONPATH" +expr "$PATH" : "${PREFIX_PATH}.*" > /dev/null || export PATH="$PREFIX_PATH:$PATH" +expr "$MANPATH" : "${PREFIX_MANPATH}.*" > /dev/null || export MANPATH="$PREFIX_MANPATH:$MANPATH" -# Print out values unless -q is set +# +# Generate egg_info so that pkg_resources works +# -if [ $# -eq 0 -o "$1" != "-q" ] ; then - echo "" - echo "Setting up Ansible to run out of checkout..." - echo "" - echo "PATH=$PATH" - echo "PYTHONPATH=$PYTHONPATH" - echo "ANSIBLE_LIBRARY=$ANSIBLE_LIBRARY" - echo "MANPATH=$MANPATH" - echo "" - - echo "Remember, you may wish to specify your host file with -i" - echo "" - echo "Done!" - echo "" +# Do the work in a function so we don't repeat ourselves later +gen_egg_info() +{ + python setup.py egg_info + if [ -e "$PREFIX_PYTHONPATH/ansible.egg-info" ] ; then + rm -r "$PREFIX_PYTHONPATH/ansible.egg-info" + fi + mv "ansible.egg-info" "$PREFIX_PYTHONPATH" +} + +if [ "$ANSIBLE_HOME" != "$PWD" ] ; then + current_dir="$PWD" +else + current_dir="$ANSIBLE_HOME" fi +cd "$ANSIBLE_HOME" +if [ "$verbosity" = silent ] ; then + gen_egg_info > /dev/null 2>&1 +else + gen_egg_info +fi +cd "$current_dir" +if [ "$verbosity" != silent ] ; then + cat <<- EOF + + Setting up Ansible to run out of checkout... + + PATH=$PATH + PYTHONPATH=$PYTHONPATH + MANPATH=$MANPATH + + Remember, you may wish to specify your host file with -i + + Done! + + EOF +fi diff --git a/hacking/env-setup.fish b/hacking/env-setup.fish index 05fb60672d1..1b872f4dc03 100644 --- a/hacking/env-setup.fish +++ b/hacking/env-setup.fish @@ -36,6 +36,16 @@ end set -gx ANSIBLE_LIBRARY $ANSIBLE_HOME/library +# Generate egg_info so that pkg_resources works +pushd $ANSIBLE_HOME +python setup.py egg_info +if test -e $PREFIX_PYTHONPATH/ansible*.egg-info + rm -r $PREFIX_PYTHONPATH/ansible*.egg-info +end +mv ansible*egg-info $PREFIX_PYTHONPATH +popd + + if set -q argv switch $argv case '-q' '--quiet' diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index f74d09ad72a..0a7d1c884ca 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -1,5 +1,6 @@ #!/usr/bin/env python # (c) 2012, Jan-Piet Mens +# (c) 2012-2014, Michael DeHaan and others # # This file is part of Ansible # @@ -44,7 +45,7 @@ TO_OLD_TO_BE_NOTABLE = 1.0 # Get parent directory of the directory this script lives in MODULEDIR=os.path.abspath(os.path.join( - os.path.dirname(os.path.realpath(__file__)), os.pardir, 'library' + os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules' )) # The name of the DOCUMENTATION template @@ -58,6 +59,8 @@ _MODULE = re.compile(r"M\(([^)]+)\)") _URL = re.compile(r"U\(([^)]+)\)") _CONST = re.compile(r"C\(([^)]+)\)") +DEPRECATED = " (D)" +NOTCORE = " (E)" ##################################################################################### def rst_ify(text): @@ -106,7 +109,9 @@ def write_data(text, options, outputname, module): ''' dumps module output to a file or the screen, as requested ''' if options.output_dir is not None: - f = open(os.path.join(options.output_dir, outputname % module), 'w') + fname = os.path.join(options.output_dir, outputname % module) + fname = fname.replace(".py","") + f = open(fname, 'w') f.write(text.encode('utf-8')) f.close() else: @@ -114,28 +119,54 @@ def write_data(text, options, outputname, module): ##################################################################################### -def list_modules(module_dir): + +def list_modules(module_dir, depth=0): ''' returns a hash of categories, each category being a hash of module names to file paths ''' - categories = dict(all=dict()) - files = glob.glob("%s/*" % module_dir) - for d in files: - if os.path.isdir(d): - files2 = glob.glob("%s/*" % d) - for f in files2: + categories = dict(all=dict(),_aliases=dict()) + if depth <= 3: # limit # of subdirs - if f.endswith(".ps1"): + files = glob.glob("%s/*" % module_dir) + for d in files: + + category = os.path.splitext(os.path.basename(d))[0] + if os.path.isdir(d): + + res = list_modules(d, depth + 1) + for key in res.keys(): + if key in categories: + categories[key] = ansible.utils.merge_hash(categories[key], res[key]) + res.pop(key, None) + + if depth < 2: + categories.update(res) + else: + category = module_dir.split("/")[-1] + if not category in categories: + categories[category] = res + else: + categories[category].update(res) + else: + module = category + category = os.path.basename(module_dir) + if not d.endswith(".py") or d.endswith('__init__.py'): # windows powershell modules have documentation stubs in python docstring # format (they are not executed) so skip the ps1 format files continue + elif module.startswith("_") and os.path.islink(d): + source = os.path.splitext(os.path.basename(os.path.realpath(d)))[0] + module = module.replace("_","",1) + if not d in categories['_aliases']: + categories['_aliases'][source] = [module] + else: + categories['_aliases'][source].update(module) + continue - tokens = f.split("/") - module = tokens[-1] - category = tokens[-2] if not category in categories: categories[category] = {} - categories[category][module] = f - categories['all'][module] = f + categories[category][module] = d + categories['all'][module] = d + return categories ##################################################################################### @@ -184,25 +215,48 @@ def jinja2_environment(template_dir, typ): ##################################################################################### -def process_module(module, options, env, template, outputname, module_map): - - print "rendering: %s" % module +def process_module(module, options, env, template, outputname, module_map, aliases): fname = module_map[module] + if isinstance(fname, dict): + return "SKIPPED" + + basename = os.path.basename(fname) + deprecated = False # ignore files with extensions - if "." in os.path.basename(fname): + if not basename.endswith(".py"): return + elif module.startswith("_"): + if os.path.islink(fname): + return # ignore, its an alias + deprecated = True + module = module.replace("_","",1) + + print "rendering: %s" % module # use ansible core library to parse out doc metadata YAML and plaintext examples doc, examples = ansible.utils.module_docs.get_docstring(fname, verbose=options.verbose) # crash if module is missing documentation and not explicitly hidden from docs index - if doc is None and module not in ansible.utils.module_docs.BLACKLIST_MODULES: - sys.stderr.write("*** ERROR: CORE MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module)) - sys.exit(1) if doc is None: - return "SKIPPED" + if module in ansible.utils.module_docs.BLACKLIST_MODULES: + return "SKIPPED" + else: + sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module)) + sys.exit(1) + + if deprecated and 'deprecated' not in doc: + sys.stderr.write("*** ERROR: DEPRECATED MODULE MISSING 'deprecated' DOCUMENTATION: %s, %s ***\n" % (fname, module)) + sys.exit(1) + + if "/core/" in fname: + doc['core'] = True + else: + doc['core'] = False + + if module in aliases: + doc['aliases'] = aliases[module] all_keys = [] @@ -226,9 +280,10 @@ def process_module(module, options, env, template, outputname, module_map): for (k,v) in doc['options'].iteritems(): all_keys.append(k) - all_keys = sorted(all_keys) - doc['option_keys'] = all_keys + all_keys = sorted(all_keys) + + doc['option_keys'] = all_keys doc['filename'] = fname doc['docuri'] = doc['module'].replace('_', '-') doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') @@ -239,13 +294,32 @@ def process_module(module, options, env, template, outputname, module_map): text = template.render(doc) write_data(text, options, outputname, module) + return doc['short_description'] ##################################################################################### +def print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases): + modstring = module + modname = module + if module in deprecated: + modstring = modstring + DEPRECATED + modname = "_" + module + elif module not in core: + modstring = modstring + NOTCORE + + result = process_module(modname, options, env, template, outputname, module_map, aliases) + + if result != "SKIPPED": + category_file.write(" %s - %s <%s_module>\n" % (modstring, result, module)) + def process_category(category, categories, options, env, template, outputname): module_map = categories[category] + aliases = {} + if '_aliases' in categories: + aliases = categories['_aliases'] + category_file_path = os.path.join(options.output_dir, "list_of_%s_modules.rst" % category) category_file = open(category_file_path, "w") print "*** recording category %s in %s ***" % (category, category_file_path) @@ -255,7 +329,27 @@ def process_category(category, categories, options, env, template, outputname): category = category.replace("_"," ") category = category.title() - modules = module_map.keys() + modules = [] + deprecated = [] + core = [] + for module in module_map.keys(): + + if isinstance(module_map[module], dict): + for mod in module_map[module].keys(): + if mod.startswith("_"): + mod = mod.replace("_","",1) + deprecated.append(mod) + elif '/core/' in module_map[module][mod]: + core.append(mod) + else: + if module.startswith("_"): + module = module.replace("_","",1) + deprecated.append(module) + elif '/core/' in module_map[module]: + core.append(module) + + modules.append(module) + modules.sort() category_header = "%s Modules" % (category.title()) @@ -265,17 +359,34 @@ def process_category(category, categories, options, env, template, outputname): %s %s -.. toctree:: - :maxdepth: 1 +.. toctree:: :maxdepth: 1 """ % (category_header, underscores)) - + sections = [] for module in modules: - result = process_module(module, options, env, template, outputname, module_map) - if result != "SKIPPED": - category_file.write(" %s_module\n" % module) + if module in module_map and isinstance(module_map[module], dict): + sections.append(module) + continue + else: + print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases) + sections.sort() + for section in sections: + category_file.write("\n%s\n%s\n\n" % (section.replace("_"," ").title(),'-' * len(section))) + category_file.write(".. toctree:: :maxdepth: 1\n\n") + section_modules = module_map[section].keys() + section_modules.sort() + #for module in module_map[section]: + for module in section_modules: + print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map[section], aliases) + + category_file.write("""\n\n +.. note:: + - %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale. + - %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not necessarily) less activity maintained than 'core' modules. + - Tickets filed on modules are filed to different repos than those on the main open source project. Core module tickets should be filed at `ansible/ansible-modules-core on GitHub `_, extras tickets to `ansible/ansible-modules-extras on GitHub `_ +""" % (DEPRECATED, NOTCORE)) category_file.close() # TODO: end a new category file @@ -320,6 +431,8 @@ def main(): category_list_file.write(" :maxdepth: 1\n\n") for category in category_names: + if category.startswith("_"): + continue category_list_file.write(" list_of_%s_modules\n" % category) process_category(category, categories, options, env, template, outputname) diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index 54fbafefc1d..232d97a7312 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -21,6 +21,17 @@ # --------------------------------------------#} +{% if aliases is defined -%} +Aliases: @{ ','.join(aliases) }@ +{% endif %} + +{% if deprecated is defined -%} +DEPRECATED +---------- + +@{ deprecated }@ +{% endif %} + Synopsis -------- @@ -101,3 +112,42 @@ Examples {% endfor %} {% endif %} + +{% if not deprecated %} + {% if core %} + +This is a Core Module +--------------------- + +This source of this module is hosted on GitHub in the `ansible-modules-core `_ repo. + +If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be grateful if you would file one. + +Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. + +Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. + +This is a "core" ansible module, which means it will receive slightly higher priority for all requests than those in the "extras" repos. + + {% else %} + +This is an Extras Module +------------------------ + +This source of this module is hosted on GitHub in the `ansible-modules-extras `_ repo. + +If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-extras `_ to see if a bug has already been filed. If not, we would be grateful if you would file one. + +Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group ` or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. + +Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. + +Note that this module is designated a "extras" module. Non-core modules are still fully usable, but may receive slightly lower response rates for issues and pull requests. +Popular "extras" modules may be promoted to core modules over time. + + {% endif %} +{% endif %} + +For help in developing on modules, should you be so inclined, please read :doc:`community`, :doc:`developing_test_pr` and :doc:`developing_modules`. + + diff --git a/hacking/test-module b/hacking/test-module index b6fe1f5cdbe..c226f32e889 100755 --- a/hacking/test-module +++ b/hacking/test-module @@ -58,7 +58,7 @@ def parse(): parser.add_option('-D', '--debugger', dest='debugger', help="path to python debugger (e.g. /usr/bin/pdb)") parser.add_option('-I', '--interpreter', dest='interpreter', - help="path to interpeter to use for this module (e.g. ansible_python_interpreter=/usr/bin/python)", + help="path to interpreter to use for this module (e.g. ansible_python_interpreter=/usr/bin/python)", metavar='INTERPRETER_TYPE=INTERPRETER_PATH') parser.add_option('-c', '--check', dest='check', action='store_true', help="run the module in check mode") @@ -104,7 +104,7 @@ def boilerplate_module(modfile, args, interpreter, check): inject = {} if interpreter: if '=' not in interpreter: - print 'interpeter must by in the form of ansible_python_interpreter=/usr/bin/python' + print 'interpreter must by in the form of ansible_python_interpreter=/usr/bin/python' sys.exit(1) interpreter_type, interpreter_path = interpreter.split('=') if not interpreter_type.startswith('ansible_'): diff --git a/hacking/update.sh b/hacking/update.sh new file mode 100755 index 00000000000..5979dd0ab2b --- /dev/null +++ b/hacking/update.sh @@ -0,0 +1,3 @@ +#!/bin/sh +git pull --rebase +git submodule update --init --recursive diff --git a/legacy/gce_tests.py b/legacy/gce_tests.py deleted file mode 100644 index 3f0a4273b08..00000000000 --- a/legacy/gce_tests.py +++ /dev/null @@ -1,748 +0,0 @@ -#!/usr/bin/env python -# Copyright 2013 Google Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a custom functional test script for the Google Compute Engine -# ansible modules. In order to run these tests, you must: -# 1) Create a Google Cloud Platform account and enable the Google -# Compute Engine service and billing -# 2) Download, install, and configure 'gcutil' -# see [https://developers.google.com/compute/docs/gcutil/] -# 3) Convert your GCE Service Account private key from PKCS12 to PEM format -# $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret \ -# > -nodes -nocerts | openssl rsa -out pkey.pem -# 4) Make sure you have libcloud 0.13.3 or later installed. -# 5) Make sure you have a libcloud 'secrets.py' file in your PYTHONPATH -# 6) Set GCE_PARAMS and GCE_KEYWORD_PARMS in your 'secrets.py' file. -# 7) Set up a simple hosts file -# $ echo 127.0.0.1 > ~/ansible_hosts -# $ echo "export ANSIBLE_HOSTS='~/ansible_hosts'" >> ~/.bashrc -# $ . ~/.bashrc -# 8) Set up your ansible 'hacking' environment -# $ cd ~/ansible -# $ . hacking/env-setup -# $ export ANSIBLE_HOST_KEY_CHECKING=no -# $ ansible all -m ping -# 9) Set your PROJECT variable below -# 10) Run and time the tests and log output, take ~30 minutes to run -# $ time stdbuf -oL python test/gce_tests.py 2>&1 | tee log -# -# Last update: gcutil-1.11.0 and v1beta16 - -# Set this to your test Project ID -PROJECT="google.com:erjohnso" - -# debugging -DEBUG=False # lots of debugging output -VERBOSE=True # on failure, display ansible command and expected/actual result - -# location - note that some tests rely on the module's 'default' -# region/zone, which should match the settings below. -REGION="us-central1" -ZONE="%s-a" % REGION - -# Peeking is a way to trigger looking at a specified set of resources -# before and/or after a test run. The 'test_cases' data structure below -# has a few tests with 'peek_before' and 'peek_after'. When those keys -# are set and PEEKING_ENABLED is True, then these steps will be executed -# to aid in debugging tests. Normally, this is not needed. -PEEKING_ENABLED=False - -# disks -DNAME="aaaaa-ansible-disk" -DNAME2="aaaaa-ansible-disk2" -DNAME6="aaaaa-ansible-inst6" -DNAME7="aaaaa-ansible-inst7" -USE_PD="true" -KERNEL="https://www.googleapis.com/compute/v1beta16/projects/google/global/kernels/gce-no-conn-track-v20130813" - -# instances -INAME="aaaaa-ansible-inst" -INAME2="aaaaa-ansible-inst2" -INAME3="aaaaa-ansible-inst3" -INAME4="aaaaa-ansible-inst4" -INAME5="aaaaa-ansible-inst5" -INAME6="aaaaa-ansible-inst6" -INAME7="aaaaa-ansible-inst7" -TYPE="n1-standard-1" -IMAGE="https://www.googleapis.com/compute/v1beta16/projects/debian-cloud/global/images/debian-7-wheezy-v20131014" -NETWORK="default" -SCOPES="https://www.googleapis.com/auth/userinfo.email,https://www.googleapis.com/auth/compute,https://www.googleapis.com/auth/devstorage.full_control" - -# networks / firewalls -NETWK1="ansible-network1" -NETWK2="ansible-network2" -NETWK3="ansible-network3" -CIDR1="10.240.16.0/24" -CIDR2="10.240.32.0/24" -CIDR3="10.240.64.0/24" -GW1="10.240.16.1" -GW2="10.240.32.1" -FW1="ansible-fwrule1" -FW2="ansible-fwrule2" -FW3="ansible-fwrule3" -FW4="ansible-fwrule4" - -# load-balancer tests -HC1="ansible-hc1" -HC2="ansible-hc2" -HC3="ansible-hc3" -LB1="ansible-lb1" -LB2="ansible-lb2" - -from commands import getstatusoutput as run -import sys - -test_cases = [ - {'id': '01', 'desc': 'Detach / Delete disk tests', - 'setup': ['gcutil addinstance "%s" --wait_until_running --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --persistent_boot_disk=%s' % (INAME, ZONE, TYPE, NETWORK, SCOPES, IMAGE, USE_PD), - 'gcutil adddisk "%s" --size_gb=2 --zone=%s --wait_until_complete' % (DNAME, ZONE)], - - 'tests': [ - {'desc': 'DETACH_ONLY but disk not found [success]', - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % ("missing-disk", INAME, ZONE), - 'r': '127.0.0.1 | success >> {"changed": false, "detach_only": true, "detached_from_instance": "%s", "name": "missing-disk", "state": "absent", "zone": "%s"}' % (INAME, ZONE), - }, - {'desc': 'DETACH_ONLY but instance not found [success]', - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % (DNAME, "missing-instance", ZONE), - 'r': '127.0.0.1 | success >> {"changed": false, "detach_only": true, "detached_from_instance": "missing-instance", "name": "%s", "size_gb": 2, "state": "absent", "zone": "%s"}' % (DNAME, ZONE), - }, - {'desc': 'DETACH_ONLY but neither disk nor instance exists [success]', - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % ("missing-disk", "missing-instance", ZONE), - 'r': '127.0.0.1 | success >> {"changed": false, "detach_only": true, "detached_from_instance": "missing-instance", "name": "missing-disk", "state": "absent", "zone": "%s"}' % (ZONE), - }, - {'desc': 'DETACH_ONLY but disk is not currently attached [success]', - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % (DNAME, INAME, ZONE), - 'r': '127.0.0.1 | success >> {"changed": false, "detach_only": true, "detached_from_instance": "%s", "name": "%s", "size_gb": 2, "state": "absent", "zone": "%s"}' % (INAME, DNAME, ZONE), - }, - {'desc': 'DETACH_ONLY disk is attached and should be detached [success]', - 'setup': ['gcutil attachdisk --disk="%s,mode=READ_ONLY" --zone=%s %s' % (DNAME, ZONE, INAME), 'sleep 10'], - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % (DNAME, INAME, ZONE), - 'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": true, "detach_only": true, "detached_from_instance": "%s", "name": "%s", "size_gb": 2, "state": "absent", "zone": "%s"}' % (INAME, INAME, DNAME, ZONE), - 'teardown': ['gcutil detachdisk --zone=%s --device_name=%s %s' % (ZONE, DNAME, INAME)], - }, - {'desc': 'DETACH_ONLY but not instance specified [FAIL]', - 'm': 'gce_pd', - 'a': 'name=%s zone=%s detach_only=yes state=absent' % (DNAME, ZONE), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must specify an instance name when detaching a disk"}', - }, - {'desc': 'DELETE but disk not found [success]', - 'm': 'gce_pd', - 'a': 'name=%s zone=%s state=absent' % ("missing-disk", ZONE), - 'r': '127.0.0.1 | success >> {"changed": false, "name": "missing-disk", "state": "absent", "zone": "%s"}' % (ZONE), - }, - {'desc': 'DELETE but disk is attached [FAIL]', - 'setup': ['gcutil attachdisk --disk="%s,mode=READ_ONLY" --zone=%s %s' % (DNAME, ZONE, INAME), 'sleep 10'], - 'm': 'gce_pd', - 'a': 'name=%s zone=%s state=absent' % (DNAME, ZONE), - 'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"The disk resource 'projects/%s/zones/%s/disks/%s' is already being used by 'projects/%s/zones/%s/instances/%s'\"}" % (PROJECT, ZONE, DNAME, PROJECT, ZONE, INAME), - 'teardown': ['gcutil detachdisk --zone=%s --device_name=%s %s' % (ZONE, DNAME, INAME)], - }, - {'desc': 'DELETE disk [success]', - 'm': 'gce_pd', - 'a': 'name=%s zone=%s state=absent' % (DNAME, ZONE), - 'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "size_gb": 2, "state": "absent", "zone": "%s"}' % (DNAME, ZONE), - }, - ], - 'teardown': ['gcutil deleteinstance -f "%s" --zone=%s' % (INAME, ZONE), - 'sleep 15', - 'gcutil deletedisk -f "%s" --zone=%s' % (INAME, ZONE), - 'sleep 10', - 'gcutil deletedisk -f "%s" --zone=%s' % (DNAME, ZONE), - 'sleep 10'], - }, - - {'id': '02', 'desc': 'Create disk but do not attach (e.g. no instance_name param)', - 'setup': [], - 'tests': [ - {'desc': 'CREATE_NO_ATTACH "string" for size_gb [FAIL]', - 'm': 'gce_pd', - 'a': 'name=%s size_gb="foo" zone=%s' % (DNAME, ZONE), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must supply a size_gb larger than 1 GB"}', - }, - {'desc': 'CREATE_NO_ATTACH negative size_gb [FAIL]', - 'm': 'gce_pd', - 'a': 'name=%s size_gb=-2 zone=%s' % (DNAME, ZONE), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must supply a size_gb larger than 1 GB"}', - }, - {'desc': 'CREATE_NO_ATTACH size_gb exceeds quota [FAIL]', - 'm': 'gce_pd', - 'a': 'name=%s size_gb=9999 zone=%s' % ("big-disk", ZONE), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Requested disk size exceeds quota"}', - }, - {'desc': 'CREATE_NO_ATTACH create the disk [success]', - 'm': 'gce_pd', - 'a': 'name=%s zone=%s' % (DNAME, ZONE), - 'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "size_gb": 10, "state": "present", "zone": "%s"}' % (DNAME, ZONE), - }, - {'desc': 'CREATE_NO_ATTACH but disk already exists [success]', - 'm': 'gce_pd', - 'a': 'name=%s zone=%s' % (DNAME, ZONE), - 'r': '127.0.0.1 | success >> {"changed": false, "name": "%s", "size_gb": 10, "state": "present", "zone": "%s"}' % (DNAME, ZONE), - }, - ], - 'teardown': ['gcutil deletedisk -f "%s" --zone=%s' % (DNAME, ZONE), - 'sleep 10'], - }, - - {'id': '03', 'desc': 'Create and attach disk', - 'setup': ['gcutil addinstance "%s" --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --persistent_boot_disk=%s' % (INAME2, ZONE, TYPE, NETWORK, SCOPES, IMAGE, USE_PD), - 'gcutil addinstance "%s" --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --persistent_boot_disk=%s' % (INAME, ZONE, "g1-small", NETWORK, SCOPES, IMAGE, USE_PD), - 'gcutil adddisk "%s" --size_gb=2 --zone=%s' % (DNAME, ZONE), - 'gcutil adddisk "%s" --size_gb=2 --zone=%s --wait_until_complete' % (DNAME2, ZONE),], - 'tests': [ - {'desc': 'CREATE_AND_ATTACH "string" for size_gb [FAIL]', - 'm': 'gce_pd', - 'a': 'name=%s size_gb="foo" instance_name=%s zone=%s' % (DNAME, INAME, ZONE), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must supply a size_gb larger than 1 GB"}', - }, - {'desc': 'CREATE_AND_ATTACH negative size_gb [FAIL]', - 'm': 'gce_pd', - 'a': 'name=%s size_gb=-2 instance_name=%s zone=%s' % (DNAME, INAME, ZONE), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must supply a size_gb larger than 1 GB"}', - }, - {'desc': 'CREATE_AND_ATTACH size_gb exceeds quota [FAIL]', - 'm': 'gce_pd', - 'a': 'name=%s size_gb=9999 instance_name=%s zone=%s' % ("big-disk", INAME, ZONE), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Requested disk size exceeds quota"}', - }, - {'desc': 'CREATE_AND_ATTACH missing instance [FAIL]', - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s' % (DNAME, "missing-instance", ZONE), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Instance %s does not exist in zone %s"}' % ("missing-instance", ZONE), - }, - {'desc': 'CREATE_AND_ATTACH disk exists but not attached [success]', - 'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)], - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s' % (DNAME, INAME, ZONE), - 'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": true, "name": "%s", "size_gb": 2, "state": "present", "zone": "%s"}' % (INAME, DNAME, ZONE), - 'peek_after': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)], - }, - {'desc': 'CREATE_AND_ATTACH disk exists already attached [success]', - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s' % (DNAME, INAME, ZONE), - 'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": false, "name": "%s", "size_gb": 2, "state": "present", "zone": "%s"}' % (INAME, DNAME, ZONE), - }, - {'desc': 'CREATE_AND_ATTACH attached RO, attempt RO to 2nd inst [success]', - 'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)], - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s' % (DNAME, INAME2, ZONE), - 'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": true, "name": "%s", "size_gb": 2, "state": "present", "zone": "%s"}' % (INAME2, DNAME, ZONE), - 'peek_after': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)], - }, - {'desc': 'CREATE_AND_ATTACH attached RO, attach RW to self [FAILED no-op]', - 'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)], - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s mode=READ_WRITE' % (DNAME, INAME, ZONE), - 'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": false, "name": "%s", "size_gb": 2, "state": "present", "zone": "%s"}' % (INAME, DNAME, ZONE), - }, - {'desc': 'CREATE_AND_ATTACH attached RW, attach RW to other [FAIL]', - 'setup': ['gcutil attachdisk --disk=%s,mode=READ_WRITE --zone=%s %s' % (DNAME2, ZONE, INAME), 'sleep 10'], - 'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)], - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s mode=READ_WRITE' % (DNAME2, INAME2, ZONE), - 'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected response: HTTP return_code[200], API error code[RESOURCE_IN_USE] and message: The disk resource 'projects/%s/zones/%s/disks/%s' is already being used in read-write mode\"}" % (PROJECT, ZONE, DNAME2), - 'peek_after': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)], - }, - {'desc': 'CREATE_AND_ATTACH attach too many disks to inst [FAIL]', - 'setup': ['gcutil adddisk aa-disk-dummy --size_gb=2 --zone=%s' % (ZONE), - 'gcutil adddisk aa-disk-dummy2 --size_gb=2 --zone=%s --wait_until_complete' % (ZONE), - 'gcutil attachdisk --disk=aa-disk-dummy --zone=%s %s' % (ZONE, INAME), - 'sleep 5'], - 'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)], - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s' % ("aa-disk-dummy2", INAME, ZONE), - 'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected response: HTTP return_code[200], API error code[LIMIT_EXCEEDED] and message: Exceeded limit 'maximum_persistent_disks' on resource 'projects/%s/zones/%s/instances/%s'. Limit: 4\"}" % (PROJECT, ZONE, INAME), - 'teardown': ['gcutil detachdisk --device_name=aa-disk-dummy --zone=%s %s' % (ZONE, INAME), - 'sleep 3', - 'gcutil deletedisk -f aa-disk-dummy --zone=%s' % (ZONE), - 'sleep 10', - 'gcutil deletedisk -f aa-disk-dummy2 --zone=%s' % (ZONE), - 'sleep 10'], - }, - ], - 'teardown': ['gcutil deleteinstance -f "%s" --zone=%s' % (INAME2, ZONE), - 'sleep 15', - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME, ZONE), - 'sleep 15', - 'gcutil deletedisk -f "%s" --zone=%s' % (INAME, ZONE), - 'sleep 10', - 'gcutil deletedisk -f "%s" --zone=%s' % (INAME2, ZONE), - 'sleep 10', - 'gcutil deletedisk -f "%s" --zone=%s' % (DNAME, ZONE), - 'sleep 10', - 'gcutil deletedisk -f "%s" --zone=%s' % (DNAME2, ZONE), - 'sleep 10'], - }, - - {'id': '04', 'desc': 'Delete / destroy instances', - 'setup': ['gcutil addinstance "%s" --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME, ZONE, TYPE, IMAGE), - 'gcutil addinstance "%s" --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME2, ZONE, TYPE, IMAGE), - 'gcutil addinstance "%s" --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME3, ZONE, TYPE, IMAGE), - 'gcutil addinstance "%s" --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME4, ZONE, TYPE, IMAGE), - 'gcutil addinstance "%s" --wait_until_running --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME5, ZONE, TYPE, IMAGE)], - 'tests': [ - {'desc': 'DELETE instance, bad zone param [FAIL]', - 'm': 'gce', - 'a': 'name=missing-inst zone=bogus state=absent', - 'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "value of zone must be one of: us-central1-a,us-central1-b,us-central2-a,europe-west1-a,europe-west1-b, got: bogus"}', - }, - {'desc': 'DELETE non-existent instance, no-op [success]', - 'm': 'gce', - 'a': 'name=missing-inst zone=%s state=absent' % (ZONE), - 'r': '127.0.0.1 | success >> {"changed": false, "name": "missing-inst", "state": "absent", "zone": "%s"}' % (ZONE), - }, - {'desc': 'DELETE an existing named instance [success]', - 'm': 'gce', - 'a': 'name=%s zone=%s state=absent' % (INAME, ZONE), - 'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "state": "absent", "zone": "%s"}' % (INAME, ZONE), - }, - {'desc': 'DELETE list of instances with a non-existent one [success]', - 'm': 'gce', - 'a': 'instance_names=%s,missing,%s zone=%s state=absent' % (INAME2,INAME3, ZONE), - 'r': '127.0.0.1 | success >> {"changed": true, "instance_names": ["%s", "%s"], "state": "absent", "zone": "%s"}' % (INAME2, INAME3, ZONE), - }, - {'desc': 'DELETE list of instances all pre-exist [success]', - 'm': 'gce', - 'a': 'instance_names=%s,%s zone=%s state=absent' % (INAME4,INAME5, ZONE), - 'r': '127.0.0.1 | success >> {"changed": true, "instance_names": ["%s", "%s"], "state": "absent", "zone": "%s"}' % (INAME4, INAME5, ZONE), - }, - ], - 'teardown': ['gcutil deleteinstance -f "%s" --zone=%s' % (INAME, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME2, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME3, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME4, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME5, ZONE), - 'sleep 10'], - }, - - {'id': '05', 'desc': 'Create instances', - 'setup': ['gcutil adddisk --source_image=%s --zone=%s %s --wait_until_complete' % (IMAGE, ZONE, DNAME7), - 'gcutil addinstance boo --wait_until_running --zone=%s --machine_type=%s --network=%s --disk=%s,mode=READ_WRITE,boot --kernel=%s' % (ZONE,TYPE,NETWORK,DNAME7,KERNEL), - ], - 'tests': [ - {'desc': 'CREATE_INSTANCE invalid image arg [FAIL]', - 'm': 'gce', - 'a': 'name=foo image=foo', - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Missing required create instance variable"}', - }, - {'desc': 'CREATE_INSTANCE metadata a list [FAIL]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s zone=%s metadata=\'[\\"foo\\":\\"bar\\",\\"baz\\":1]\'' % (INAME,ZONE), - 'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata syntax"}', - }, - {'desc': 'CREATE_INSTANCE metadata not a dict [FAIL]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s zone=%s metadata=\\"foo\\":\\"bar\\",\\"baz\\":1' % (INAME,ZONE), - 'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata syntax"}', - }, - {'desc': 'CREATE_INSTANCE with metadata form1 [FAIL]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s zone=%s metadata=\'{"foo":"bar","baz":1}\'' % (INAME,ZONE), - 'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata: malformed string"}', - }, - {'desc': 'CREATE_INSTANCE with metadata form2 [FAIL]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s zone=%s metadata={\'foo\':\'bar\',\'baz\':1}' % (INAME,ZONE), - 'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata: malformed string"}', - }, - {'desc': 'CREATE_INSTANCE with metadata form3 [FAIL]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s zone=%s metadata="foo:bar" '% (INAME,ZONE), - 'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata syntax"}', - }, - {'desc': 'CREATE_INSTANCE with metadata form4 [FAIL]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s zone=%s metadata="{\'foo\':\'bar\'}"'% (INAME,ZONE), - 'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata: malformed string"}', - }, - {'desc': 'CREATE_INSTANCE invalid image arg [FAIL]', - 'm': 'gce', - 'a': 'instance_names=foo,bar image=foo', - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Missing required create instance variable"}', - }, - {'desc': 'CREATE_INSTANCE single inst, using defaults [success]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s' % (INAME), - 'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": "debian-7-wheezy-v20130816", "machine_type": "n1-standard-1", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.175.15", "public_ip": "173.255.120.190", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME, ZONE, INAME, ZONE), - }, - {'desc': 'CREATE_INSTANCE the same instance again, no-op [success]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s' % (INAME), - 'r': '127.0.0.1 | success >> {"changed": false, "instance_data": [{"image": "debian-7-wheezy-v20130816", "machine_type": "n1-standard-1", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.175.15", "public_ip": "173.255.120.190", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME, ZONE, INAME, ZONE), - }, - {'desc': 'CREATE_INSTANCE instance with alt type [success]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s machine_type=n1-standard-2' % (INAME2), - 'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": "debian-7-wheezy-v20130816", "machine_type": "n1-standard-2", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.192.227", "public_ip": "173.255.121.233", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME2, ZONE, INAME2, ZONE), - }, - {'desc': 'CREATE_INSTANCE instance with root pd [success]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s persistent_boot_disk=yes' % (INAME3), - 'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": null, "machine_type": "n1-standard-1", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.178.140", "public_ip": "173.255.121.176", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME3, ZONE, INAME3, ZONE), - }, - {'desc': 'CREATE_INSTANCE instance with root pd, that already exists [success]', - 'setup': ['gcutil adddisk --source_image=%s --zone=%s %s --wait_until_complete' % (IMAGE, ZONE, DNAME6),], - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s zone=%s persistent_boot_disk=yes' % (INAME6, ZONE), - 'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": null, "machine_type": "n1-standard-1", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.178.140", "public_ip": "173.255.121.176", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME6, ZONE, INAME6, ZONE), - }, - {'desc': 'CREATE_INSTANCE instance with root pd attached to other inst [FAIL]', - 'm': 'gce', - 'a': 'name=%s zone=%s persistent_boot_disk=yes' % (INAME7, ZONE), - 'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "Unexpected error attempting to create instance %s, error: The disk resource \'projects/%s/zones/%s/disks/%s\' is already being used in read-write mode"}' % (INAME7,PROJECT,ZONE,DNAME7), - }, - {'desc': 'CREATE_INSTANCE use *all* the options! [success]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'instance_names=%s,%s metadata=\'{\\"foo\\":\\"bar\\", \\"baz\\":1}\' tags=t1,t2,t3 zone=%s image=centos-6-v20130731 persistent_boot_disk=yes' % (INAME4,INAME5,ZONE), - 'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": null, "machine_type": "n1-standard-1", "metadata": {"baz": "1", "foo": "bar"}, "name": "%s", "network": "default", "private_ip": "10.240.130.4", "public_ip": "173.255.121.97", "status": "RUNNING", "tags": ["t1", "t2", "t3"], "zone": "%s"}, {"image": null, "machine_type": "n1-standard-1", "metadata": {"baz": "1", "foo": "bar"}, "name": "%s", "network": "default", "private_ip": "10.240.207.226", "public_ip": "173.255.121.85", "status": "RUNNING", "tags": ["t1", "t2", "t3"], "zone": "%s"}], "instance_names": ["%s", "%s"], "state": "present", "zone": "%s"}' % (INAME4, ZONE, INAME5, ZONE, INAME4, INAME5, ZONE), - }, - ], - 'teardown': ['gcutil deleteinstance -f "%s" --zone=%s' % (INAME, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME2, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME3, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME4, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME5, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME6, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME7, ZONE), - 'gcutil deleteinstance -f boo --zone=%s' % (ZONE), - 'sleep 10', - 'gcutil deletedisk -f "%s" --zone=%s' % (INAME3, ZONE), - 'gcutil deletedisk -f "%s" --zone=%s' % (INAME4, ZONE), - 'gcutil deletedisk -f "%s" --zone=%s' % (INAME5, ZONE), - 'gcutil deletedisk -f "%s" --zone=%s' % (INAME6, ZONE), - 'gcutil deletedisk -f "%s" --zone=%s' % (INAME7, ZONE), - 'sleep 10'], - }, - - {'id': '06', 'desc': 'Delete / destroy networks and firewall rules', - 'setup': ['gcutil addnetwork --range="%s" --gateway="%s" %s' % (CIDR1, GW1, NETWK1), - 'gcutil addnetwork --range="%s" --gateway="%s" %s' % (CIDR2, GW2, NETWK2), - 'sleep 5', - 'gcutil addfirewall --allowed="tcp:80" --network=%s %s' % (NETWK1, FW1), - 'gcutil addfirewall --allowed="tcp:80" --network=%s %s' % (NETWK2, FW2), - 'sleep 5'], - 'tests': [ - {'desc': 'DELETE bogus named firewall [success]', - 'm': 'gce_net', - 'a': 'fwname=missing-fwrule state=absent', - 'r': '127.0.0.1 | success >> {"changed": false, "fwname": "missing-fwrule", "state": "absent"}', - }, - {'desc': 'DELETE bogus named network [success]', - 'm': 'gce_net', - 'a': 'name=missing-network state=absent', - 'r': '127.0.0.1 | success >> {"changed": false, "name": "missing-network", "state": "absent"}', - }, - {'desc': 'DELETE named firewall rule [success]', - 'm': 'gce_net', - 'a': 'fwname=%s state=absent' % (FW1), - 'r': '127.0.0.1 | success >> {"changed": true, "fwname": "%s", "state": "absent"}' % (FW1), - 'teardown': ['sleep 5'], # pause to give GCE time to delete fwrule - }, - {'desc': 'DELETE unused named network [success]', - 'm': 'gce_net', - 'a': 'name=%s state=absent' % (NETWK1), - 'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "state": "absent"}' % (NETWK1), - }, - {'desc': 'DELETE named network *and* fwrule [success]', - 'm': 'gce_net', - 'a': 'name=%s fwname=%s state=absent' % (NETWK2, FW2), - 'r': '127.0.0.1 | success >> {"changed": true, "fwname": "%s", "name": "%s", "state": "absent"}' % (FW2, NETWK2), - }, - ], - 'teardown': ['gcutil deletenetwork -f %s' % (NETWK1), - 'gcutil deletenetwork -f %s' % (NETWK2), - 'sleep 5', - 'gcutil deletefirewall -f %s' % (FW1), - 'gcutil deletefirewall -f %s' % (FW2)], - }, - - {'id': '07', 'desc': 'Create networks and firewall rules', - 'setup': ['gcutil addnetwork --range="%s" --gateway="%s" %s' % (CIDR1, GW1, NETWK1), - 'sleep 5', - 'gcutil addfirewall --allowed="tcp:80" --network=%s %s' % (NETWK1, FW1), - 'sleep 5'], - 'tests': [ - {'desc': 'CREATE network without specifying ipv4_range [FAIL]', - 'm': 'gce_net', - 'a': 'name=fail', - 'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Missing required 'ipv4_range' parameter\"}", - }, - {'desc': 'CREATE network with specifying bad ipv4_range [FAIL]', - 'm': 'gce_net', - 'a': 'name=fail ipv4_range=bad_value', - 'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected response: HTTP return_code[400], API error code[None] and message: Invalid value for field 'resource.IPv4Range': 'bad_value'. Must be a CIDR address range that is contained in the RFC1918 private address blocks: [10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16]\"}", - }, - {'desc': 'CREATE existing network, not changed [success]', - 'm': 'gce_net', - 'a': 'name=%s ipv4_range=%s' % (NETWK1, CIDR1), - 'r': '127.0.0.1 | success >> {"changed": false, "ipv4_range": "%s", "name": "%s", "state": "present"}' % (CIDR1, NETWK1), - }, - {'desc': 'CREATE new network, changed [success]', - 'm': 'gce_net', - 'a': 'name=%s ipv4_range=%s' % (NETWK2, CIDR2), - 'r': '127.0.0.1 | success >> {"changed": true, "ipv4_range": "10.240.32.0/24", "name": "%s", "state": "present"}' % (NETWK2), - }, - {'desc': 'CREATE new fw rule missing params [FAIL]', - 'm': 'gce_net', - 'a': 'name=%s fwname=%s' % (NETWK1, FW1), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Missing required firewall rule parameter(s)"}', - }, - {'desc': 'CREATE new fw rule bad params [FAIL]', - 'm': 'gce_net', - 'a': 'name=%s fwname=broken allowed=blah src_tags="one,two"' % (NETWK1), - 'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected response: HTTP return_code[400], API error code[None] and message: Invalid value for field 'resource.allowed[0].IPProtocol': 'blah'. Must be one of [\\\"tcp\\\", \\\"udp\\\", \\\"icmp\\\"] or an IP protocol number between 0 and 255\"}", - }, - {'desc': 'CREATE existing fw rule [success]', - 'm': 'gce_net', - 'a': 'name=%s fwname=%s allowed="tcp:80" src_tags="one,two"' % (NETWK1, FW1), - 'r': '127.0.0.1 | success >> {"allowed": "tcp:80", "changed": false, "fwname": "%s", "ipv4_range": "%s", "name": "%s", "src_range": null, "src_tags": ["one", "two"], "state": "present"}' % (FW1, CIDR1, NETWK1), - }, - {'desc': 'CREATE new fw rule [success]', - 'm': 'gce_net', - 'a': 'name=%s fwname=%s allowed="tcp:80" src_tags="one,two"' % (NETWK1, FW3), - 'r': '127.0.0.1 | success >> {"allowed": "tcp:80", "changed": true, "fwname": "%s", "ipv4_range": "%s", "name": "%s", "src_range": null, "src_tags": ["one", "two"], "state": "present"}' % (FW3, CIDR1, NETWK1), - }, - {'desc': 'CREATE new network *and* fw rule [success]', - 'm': 'gce_net', - 'a': 'name=%s ipv4_range=%s fwname=%s allowed="tcp:80" src_tags="one,two"' % (NETWK3, CIDR3, FW4), - 'r': '127.0.0.1 | success >> {"allowed": "tcp:80", "changed": true, "fwname": "%s", "ipv4_range": "%s", "name": "%s", "src_range": null, "src_tags": ["one", "two"], "state": "present"}' % (FW4, CIDR3, NETWK3), - }, - ], - 'teardown': ['gcutil deletefirewall -f %s' % (FW1), - 'gcutil deletefirewall -f %s' % (FW2), - 'gcutil deletefirewall -f %s' % (FW3), - 'gcutil deletefirewall -f %s' % (FW4), - 'sleep 5', - 'gcutil deletenetwork -f %s' % (NETWK1), - 'gcutil deletenetwork -f %s' % (NETWK2), - 'gcutil deletenetwork -f %s' % (NETWK3), - 'sleep 5'], - }, - - {'id': '08', 'desc': 'Create load-balancer resources', - 'setup': ['gcutil addinstance "%s" --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --nopersistent_boot_disk' % (INAME, ZONE, TYPE, NETWORK, SCOPES, IMAGE), - 'gcutil addinstance "%s" --wait_until_running --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --nopersistent_boot_disk' % (INAME2, ZONE, TYPE, NETWORK, SCOPES, IMAGE), - ], - 'tests': [ - {'desc': 'Do nothing [FAIL]', - 'm': 'gce_lb', - 'a': 'httphealthcheck_port=7', - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Nothing to do, please specify a \\\"name\\\" or \\\"httphealthcheck_name\\\" parameter"}', - }, - {'desc': 'CREATE_HC create basic http healthcheck [success]', - 'm': 'gce_lb', - 'a': 'httphealthcheck_name=%s' % (HC1), - 'r': '127.0.0.1 | success >> {"changed": true, "httphealthcheck_healthy_count": 2, "httphealthcheck_host": null, "httphealthcheck_interval": 5, "httphealthcheck_name": "%s", "httphealthcheck_path": "/", "httphealthcheck_port": 80, "httphealthcheck_timeout": 5, "httphealthcheck_unhealthy_count": 2, "name": null, "state": "present"}' % (HC1), - }, - {'desc': 'CREATE_HC (repeat, no-op) create basic http healthcheck [success]', - 'm': 'gce_lb', - 'a': 'httphealthcheck_name=%s' % (HC1), - 'r': '127.0.0.1 | success >> {"changed": false, "httphealthcheck_healthy_count": 2, "httphealthcheck_host": null, "httphealthcheck_interval": 5, "httphealthcheck_name": "%s", "httphealthcheck_path": "/", "httphealthcheck_port": 80, "httphealthcheck_timeout": 5, "httphealthcheck_unhealthy_count": 2, "name": null, "state": "present"}' % (HC1), - }, - {'desc': 'CREATE_HC create custom http healthcheck [success]', - 'm': 'gce_lb', - 'a': 'httphealthcheck_name=%s httphealthcheck_port=1234 httphealthcheck_path="/whatup" httphealthcheck_host="foo" httphealthcheck_interval=300' % (HC2), - 'r': '127.0.0.1 | success >> {"changed": true, "httphealthcheck_healthy_count": 2, "httphealthcheck_host": "foo", "httphealthcheck_interval": 300, "httphealthcheck_name": "%s", "httphealthcheck_path": "/whatup", "httphealthcheck_port": 1234, "httphealthcheck_timeout": 5, "httphealthcheck_unhealthy_count": 2, "name": null, "state": "present"}' % (HC2), - }, - {'desc': 'CREATE_HC create (broken) custom http healthcheck [FAIL]', - 'm': 'gce_lb', - 'a': 'httphealthcheck_name=%s httphealthcheck_port="string" httphealthcheck_path=7' % (HC3), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Unexpected response: HTTP return_code[400], API error code[None] and message: Invalid value for: Expected a signed integer, got \'string\' (class java.lang.String)"}', - }, - {'desc': 'CREATE_LB create lb, missing region [FAIL]', - 'm': 'gce_lb', - 'a': 'name=%s' % (LB1), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Missing required region name"}', - }, - {'desc': 'CREATE_LB create lb, bogus region [FAIL]', - 'm': 'gce_lb', - 'a': 'name=%s region=bogus' % (LB1), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Unexpected response: HTTP return_code[404], API error code[None] and message: The resource \'projects/%s/regions/bogus\' was not found"}' % (PROJECT), - }, - {'desc': 'CREATE_LB create lb, minimal params [success]', - 'strip_numbers': True, - 'm': 'gce_lb', - 'a': 'name=%s region=%s' % (LB1, REGION), - 'r': '127.0.0.1 | success >> {"changed": true, "external_ip": "173.255.123.245", "httphealthchecks": [], "members": [], "name": "%s", "port_range": "1-65535", "protocol": "tcp", "region": "%s", "state": "present"}' % (LB1, REGION), - }, - {'desc': 'CREATE_LB create lb full params [success]', - 'strip_numbers': True, - 'm': 'gce_lb', - 'a': 'httphealthcheck_name=%s httphealthcheck_port=5055 httphealthcheck_path="/howami" name=%s port_range=8000-8888 region=%s members=%s/%s,%s/%s' % (HC3,LB2,REGION,ZONE,INAME,ZONE,INAME2), - 'r': '127.0.0.1 | success >> {"changed": true, "external_ip": "173.255.126.81", "httphealthcheck_healthy_count": 2, "httphealthcheck_host": null, "httphealthcheck_interval": 5, "httphealthcheck_name": "%s", "httphealthcheck_path": "/howami", "httphealthcheck_port": 5055, "httphealthcheck_timeout": 5, "httphealthcheck_unhealthy_count": 2, "httphealthchecks": ["%s"], "members": ["%s/%s", "%s/%s"], "name": "%s", "port_range": "8000-8888", "protocol": "tcp", "region": "%s", "state": "present"}' % (HC3,HC3,ZONE,INAME,ZONE,INAME2,LB2,REGION), - }, - ], - 'teardown': [ - 'gcutil deleteinstance --zone=%s -f %s %s' % (ZONE, INAME, INAME2), - 'gcutil deleteforwardingrule --region=%s -f %s %s' % (REGION, LB1, LB2), - 'sleep 10', - 'gcutil deletetargetpool --region=%s -f %s-tp %s-tp' % (REGION, LB1, LB2), - 'sleep 10', - 'gcutil deletehttphealthcheck -f %s %s %s' % (HC1, HC2, HC3), - ], - }, - - {'id': '09', 'desc': 'Destroy load-balancer resources', - 'setup': ['gcutil addhttphealthcheck %s' % (HC1), - 'sleep 5', - 'gcutil addhttphealthcheck %s' % (HC2), - 'sleep 5', - 'gcutil addtargetpool --health_checks=%s --region=%s %s-tp' % (HC1, REGION, LB1), - 'sleep 5', - 'gcutil addforwardingrule --target=%s-tp --region=%s %s' % (LB1, REGION, LB1), - 'sleep 5', - 'gcutil addtargetpool --region=%s %s-tp' % (REGION, LB2), - 'sleep 5', - 'gcutil addforwardingrule --target=%s-tp --region=%s %s' % (LB2, REGION, LB2), - 'sleep 5', - ], - 'tests': [ - {'desc': 'DELETE_LB: delete a non-existent LB [success]', - 'm': 'gce_lb', - 'a': 'name=missing state=absent', - 'r': '127.0.0.1 | success >> {"changed": false, "name": "missing", "state": "absent"}', - }, - {'desc': 'DELETE_LB: delete a non-existent LB+HC [success]', - 'm': 'gce_lb', - 'a': 'name=missing httphealthcheck_name=alsomissing state=absent', - 'r': '127.0.0.1 | success >> {"changed": false, "httphealthcheck_name": "alsomissing", "name": "missing", "state": "absent"}', - }, - {'desc': 'DELETE_LB: destroy standalone healthcheck [success]', - 'm': 'gce_lb', - 'a': 'httphealthcheck_name=%s state=absent' % (HC2), - 'r': '127.0.0.1 | success >> {"changed": true, "httphealthcheck_name": "%s", "name": null, "state": "absent"}' % (HC2), - }, - {'desc': 'DELETE_LB: destroy standalone balancer [success]', - 'm': 'gce_lb', - 'a': 'name=%s state=absent' % (LB2), - 'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "state": "absent"}' % (LB2), - }, - {'desc': 'DELETE_LB: destroy LB+HC [success]', - 'm': 'gce_lb', - 'a': 'name=%s httphealthcheck_name=%s state=absent' % (LB1, HC1), - 'r': '127.0.0.1 | success >> {"changed": true, "httphealthcheck_name": "%s", "name": "%s", "state": "absent"}' % (HC1,LB1), - }, - ], - 'teardown': [ - 'gcutil deleteforwardingrule --region=%s -f %s %s' % (REGION, LB1, LB2), - 'sleep 10', - 'gcutil deletetargetpool --region=%s -f %s-tp %s-tp' % (REGION, LB1, LB2), - 'sleep 10', - 'gcutil deletehttphealthcheck -f %s %s' % (HC1, HC2), - ], - }, -] - -def main(tests_to_run=[]): - for test in test_cases: - if tests_to_run and test['id'] not in tests_to_run: - continue - print "=> starting/setup '%s:%s'"% (test['id'], test['desc']) - if DEBUG: print "=debug>", test['setup'] - for c in test['setup']: - (s,o) = run(c) - test_i = 1 - for t in test['tests']: - if DEBUG: print "=>debug>", test_i, t['desc'] - # run any test-specific setup commands - if t.has_key('setup'): - for setup in t['setup']: - (status, output) = run(setup) - - # run any 'peek_before' commands - if t.has_key('peek_before') and PEEKING_ENABLED: - for setup in t['peek_before']: - (status, output) = run(setup) - - # run the ansible test if 'a' exists, otherwise - # an empty 'a' directive allows test to run - # setup/teardown for a subsequent test. - if t['a']: - if DEBUG: print "=>debug>", t['m'], t['a'] - acmd = "ansible all -o -m %s -a \"%s\"" % (t['m'],t['a']) - #acmd = "ANSIBLE_KEEP_REMOTE_FILES=1 ansible all -vvv -m %s -a \"%s\"" % (t['m'],t['a']) - (s,o) = run(acmd) - - # check expected output - if DEBUG: print "=debug>", o.strip(), "!=", t['r'] - print "=> %s.%02d '%s':" % (test['id'], test_i, t['desc']), - if t.has_key('strip_numbers'): - # strip out all numbers so we don't trip over different - # IP addresses - is_good = (o.strip().translate(None, "0123456789") == t['r'].translate(None, "0123456789")) - else: - is_good = (o.strip() == t['r']) - - if is_good: - print "PASS" - else: - print "FAIL" - if VERBOSE: - print "=>", acmd - print "=> Expected:", t['r'] - print "=> Got:", o.strip() - - # run any 'peek_after' commands - if t.has_key('peek_after') and PEEKING_ENABLED: - for setup in t['peek_after']: - (status, output) = run(setup) - - # run any test-specific teardown commands - if t.has_key('teardown'): - for td in t['teardown']: - (status, output) = run(td) - test_i += 1 - - print "=> completing/teardown '%s:%s'" % (test['id'], test['desc']) - if DEBUG: print "=debug>", test['teardown'] - for c in test['teardown']: - (s,o) = run(c) - - -if __name__ == '__main__': - tests_to_run = [] - if len(sys.argv) == 2: - if sys.argv[1] in ["--help", "--list"]: - print "usage: %s [id1,id2,...,idN]" % sys.argv[0] - print " * An empty argument list will execute all tests" - print " * Do not need to specify tests in numerical order" - print " * List test categories with --list or --help" - print "" - for test in test_cases: - print "\t%s:%s" % (test['id'], test['desc']) - sys.exit(0) - else: - tests_to_run = sys.argv[1].split(',') - main(tests_to_run) diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py index 2585fdc30f3..27e79a41cad 100644 --- a/lib/ansible/__init__.py +++ b/lib/ansible/__init__.py @@ -14,5 +14,5 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -__version__ = '1.8' +__version__ = '1.9' __author__ = 'Michael DeHaan' diff --git a/lib/ansible/cache/jsonfile.py b/lib/ansible/cache/jsonfile.py new file mode 100644 index 00000000000..b7d72c8d2e8 --- /dev/null +++ b/lib/ansible/cache/jsonfile.py @@ -0,0 +1,141 @@ +# (c) 2014, Brian Coca, Josh Drake, et al +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import time +import errno + +try: + import simplejson as json +except ImportError: + import json + +from ansible import constants as C +from ansible import utils +from ansible.cache.base import BaseCacheModule + +class CacheModule(BaseCacheModule): + """ + A caching module backed by json files. + """ + def __init__(self, *args, **kwargs): + + self._timeout = float(C.CACHE_PLUGIN_TIMEOUT) + self._cache = {} + self._cache_dir = C.CACHE_PLUGIN_CONNECTION # expects a dir path + if not self._cache_dir: + utils.exit("error, fact_caching_connection is not set, cannot use fact cache") + + if not os.path.exists(self._cache_dir): + try: + os.makedirs(self._cache_dir) + except (OSError,IOError), e: + utils.warning("error while trying to create cache dir %s : %s" % (self._cache_dir, str(e))) + return None + + def get(self, key): + + if key in self._cache: + return self._cache.get(key) + + if self.has_expired(key): + raise KeyError + + cachefile = "%s/%s" % (self._cache_dir, key) + try: + f = open( cachefile, 'r') + except (OSError,IOError), e: + utils.warning("error while trying to write to %s : %s" % (cachefile, str(e))) + else: + value = json.load(f) + self._cache[key] = value + return value + finally: + f.close() + + def set(self, key, value): + + self._cache[key] = value + + cachefile = "%s/%s" % (self._cache_dir, key) + try: + f = open(cachefile, 'w') + except (OSError,IOError), e: + utils.warning("error while trying to read %s : %s" % (cachefile, str(e))) + else: + f.write(utils.jsonify(value)) + finally: + f.close() + + def has_expired(self, key): + + cachefile = "%s/%s" % (self._cache_dir, key) + try: + st = os.stat(cachefile) + except (OSError,IOError), e: + if e.errno == errno.ENOENT: + return False + else: + utils.warning("error while trying to stat %s : %s" % (cachefile, str(e))) + + if time.time() - st.st_mtime <= self._timeout: + return False + + if key in self._cache: + del self._cache[key] + return True + + def keys(self): + keys = [] + for k in os.listdir(self._cache_dir): + if not (k.startswith('.') or self.has_expired(k)): + keys.append(k) + return keys + + def contains(self, key): + + if key in self._cache: + return True + + if self.has_expired(key): + return False + try: + st = os.stat("%s/%s" % (self._cache_dir, key)) + return True + except (OSError,IOError), e: + if e.errno == errno.ENOENT: + return False + else: + utils.warning("error while trying to stat %s : %s" % (cachefile, str(e))) + + def delete(self, key): + del self._cache[key] + try: + os.remove("%s/%s" % (self._cache_dir, key)) + except (OSError,IOError), e: + pass #TODO: only pass on non existing? + + def flush(self): + self._cache = {} + for key in self.keys(): + self.delete(key) + + def copy(self): + ret = dict() + for key in self.keys(): + ret[key] = self.get(key) + return ret diff --git a/lib/ansible/cache/redis.py b/lib/ansible/cache/redis.py index c55b74469de..7ae5ef74c16 100644 --- a/lib/ansible/cache/redis.py +++ b/lib/ansible/cache/redis.py @@ -20,9 +20,14 @@ import collections # FIXME: can we store these as something else before we ship it? import sys import time -import json + +try: + import simplejson as json +except ImportError: + import json from ansible import constants as C +from ansible.utils import jsonify from ansible.cache.base import BaseCacheModule try: @@ -65,7 +70,7 @@ class CacheModule(BaseCacheModule): return json.loads(value) def set(self, key, value): - value2 = json.dumps(value) + value2 = jsonify(value) if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire' self._cache.setex(self._make_key(key), int(self._timeout), value2) else: diff --git a/lib/ansible/callbacks.py b/lib/ansible/callbacks.py index 2042dbc93a9..3c0a4fe52e9 100644 --- a/lib/ansible/callbacks.py +++ b/lib/ansible/callbacks.py @@ -27,6 +27,7 @@ import fcntl import constants import locale from ansible.color import stringc +from ansible.module_utils import basic import logging if constants.DEFAULT_LOG_PATH != '': @@ -411,7 +412,7 @@ class CliRunnerCallbacks(DefaultRunnerCallbacks): self._async_notified[jid] = clock + 1 if self._async_notified[jid] > clock: self._async_notified[jid] = clock - display(" polling, %ss remaining" % (jid, clock), runner=self.runner) + display(" polling on %s, %ss remaining" % (jid, host, clock), runner=self.runner) super(CliRunnerCallbacks, self).on_async_poll(host, res, jid, clock) def on_async_ok(self, host, res, jid): @@ -450,13 +451,18 @@ class PlaybookRunnerCallbacks(DefaultRunnerCallbacks): self._async_notified = {} def on_unreachable(self, host, results): - delegate_to = self.runner.module_vars.get('delegate_to') - if delegate_to: - host = '%s -> %s' % (host, delegate_to) + if self.runner.delegate_to: + host = '%s -> %s' % (host, self.runner.delegate_to) item = None if type(results) == dict: item = results.get('item', None) + if isinstance(item, unicode): + item = utils.to_bytes(item) + results = basic.json_dict_unicode_to_bytes(results) + else: + results = utils.to_bytes(results) + host = utils.to_bytes(host) if item: msg = "fatal: [%s] => (item=%s) => %s" % (host, item, results) else: @@ -465,9 +471,8 @@ class PlaybookRunnerCallbacks(DefaultRunnerCallbacks): super(PlaybookRunnerCallbacks, self).on_unreachable(host, results) def on_failed(self, host, results, ignore_errors=False): - delegate_to = self.runner.module_vars.get('delegate_to') - if delegate_to: - host = '%s -> %s' % (host, delegate_to) + if self.runner.delegate_to: + host = '%s -> %s' % (host, self.runner.delegate_to) results2 = results.copy() results2.pop('invocation', None) @@ -500,9 +505,8 @@ class PlaybookRunnerCallbacks(DefaultRunnerCallbacks): super(PlaybookRunnerCallbacks, self).on_failed(host, results, ignore_errors=ignore_errors) def on_ok(self, host, host_result): - delegate_to = self.runner.module_vars.get('delegate_to') - if delegate_to: - host = '%s -> %s' % (host, delegate_to) + if self.runner.delegate_to: + host = '%s -> %s' % (host, self.runner.delegate_to) item = host_result.get('item', None) @@ -542,9 +546,8 @@ class PlaybookRunnerCallbacks(DefaultRunnerCallbacks): super(PlaybookRunnerCallbacks, self).on_ok(host, host_result) def on_skipped(self, host, item=None): - delegate_to = self.runner.module_vars.get('delegate_to') - if delegate_to: - host = '%s -> %s' % (host, delegate_to) + if self.runner.delegate_to: + host = '%s -> %s' % (host, self.runner.delegate_to) if constants.DISPLAY_SKIPPED_HOSTS: msg = '' @@ -607,11 +610,13 @@ class PlaybookCallbacks(object): call_callback_module('playbook_on_no_hosts_remaining') def on_task_start(self, name, is_conditional): + name = utils.to_bytes(name) msg = "TASK: [%s]" % name if is_conditional: msg = "NOTIFIED: [%s]" % name if hasattr(self, 'start_at'): + self.start_at = utils.to_bytes(self.start_at) if name == self.start_at or fnmatch.fnmatch(name, self.start_at): # we found out match, we can get rid of this now del self.start_at @@ -624,7 +629,13 @@ class PlaybookCallbacks(object): if hasattr(self, 'start_at'): # we still have start_at so skip the task self.skip_task = True elif hasattr(self, 'step') and self.step: - msg = ('Perform task: %s (y/n/c): ' % name).encode(sys.stdout.encoding) + if isinstance(name, str): + name = utils.to_unicode(name) + msg = u'Perform task: %s (y/n/c): ' % name + if sys.stdout.encoding: + msg = msg.encode(sys.stdout.encoding, errors='replace') + else: + msg = msg.encode('utf-8') resp = raw_input(msg) if resp.lower() in ['y','yes']: self.skip_task = False @@ -674,7 +685,7 @@ class PlaybookCallbacks(object): result = prompt(msg, private) # if result is false and default is not None - if not result and default: + if not result and default is not None: result = default diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 8c342497c13..31dc91463e8 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -86,26 +86,13 @@ def shell_expand_path(path): path = os.path.expanduser(os.path.expandvars(path)) return path +def get_plugin_paths(path): + return ':'.join([os.path.join(x, path) for x in [os.path.expanduser('~/.ansible/plugins/'), '/usr/share/ansible_plugins/']]) + p = load_config_file() active_user = pwd.getpwuid(os.geteuid())[0] -# Needed so the RPM can call setup.py and have modules land in the -# correct location. See #1277 for discussion -if getattr(sys, "real_prefix", None): - # in a virtualenv - DIST_MODULE_PATH = os.path.join(sys.prefix, 'share/ansible/') -else: - DIST_MODULE_PATH = '/usr/share/ansible/' - -# Look for modules relative to this file path -# This is so that we can find the modules when running from a local checkout -# installed as editable with `pip install -e ...` or `python setup.py develop` -local_module_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), '..', '..', 'library') -) -DIST_MODULE_PATH = os.pathsep.join([DIST_MODULE_PATH, local_module_path]) - # check all of these extensions when looking for yaml files for things like # group variables -- really anything we can load YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ] @@ -114,8 +101,8 @@ YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ] DEFAULTS='defaults' # configurable things -DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts')) -DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', DIST_MODULE_PATH) +DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'inventory', 'ANSIBLE_INVENTORY', get_config(p, DEFAULTS,'hostfile','ANSIBLE_HOSTS', '/etc/ansible/hosts'))) +DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None) DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles')) DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp') DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command') @@ -151,13 +138,13 @@ DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER' DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True) DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() -DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '/usr/share/ansible_plugins/action_plugins') -DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '/usr/share/ansible_plugins/cache_plugins') -DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '/usr/share/ansible_plugins/callback_plugins') -DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '/usr/share/ansible_plugins/connection_plugins') -DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '/usr/share/ansible_plugins/lookup_plugins') -DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '/usr/share/ansible_plugins/vars_plugins') -DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '/usr/share/ansible_plugins/filter_plugins') +DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', get_plugin_paths('action_plugins')) +DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', get_plugin_paths('cache_plugins')) +DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', get_plugin_paths('callback_plugins')) +DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', get_plugin_paths('connection_plugins')) +DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', get_plugin_paths('lookup_plugins')) +DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', get_plugin_paths('vars_plugins')) +DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', get_plugin_paths('filter_plugins')) DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') @@ -177,6 +164,9 @@ DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', ' COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True) DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) +RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) +RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') + # CONNECTION RELATED ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None) ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 972042c9b14..2048046d3c1 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -420,7 +420,7 @@ class Inventory(object): group = self.get_group(groupname) if group is None: - raise Exception("group not found: %s" % groupname) + raise errors.AnsibleError("group not found: %s" % groupname) vars = {} @@ -437,7 +437,10 @@ class Inventory(object): def get_variables(self, hostname, update_cached=False, vault_password=None): - return self.get_host(hostname).get_variables() + host = self.get_host(hostname) + if not host: + raise errors.AnsibleError("host not found: %s" % hostname) + return host.get_variables() def get_host_variables(self, hostname, update_cached=False, vault_password=None): diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py index 3848696006e..2c05253bb3a 100644 --- a/lib/ansible/inventory/ini.py +++ b/lib/ansible/inventory/ini.py @@ -36,6 +36,7 @@ class InventoryParser(object): def __init__(self, filename=C.DEFAULT_HOST_LIST): with open(filename) as fh: + self.filename = filename self.lines = fh.readlines() self.groups = {} self.hosts = {} @@ -87,8 +88,8 @@ class InventoryParser(object): self.groups = dict(all=all, ungrouped=ungrouped) active_group_name = 'ungrouped' - for line in self.lines: - line = utils.before_comment(line).strip() + for lineno in range(len(self.lines)): + line = utils.before_comment(self.lines[lineno]).strip() if line.startswith("[") and line.endswith("]"): active_group_name = line.replace("[","").replace("]","") if ":vars" in line or ":children" in line: @@ -142,7 +143,7 @@ class InventoryParser(object): try: (k,v) = t.split("=", 1) except ValueError, e: - raise errors.AnsibleError("Invalid ini entry: %s - %s" % (t, str(e))) + raise errors.AnsibleError("%s:%s: Invalid ini entry: %s - %s" % (self.filename, lineno + 1, t, str(e))) host.set_variable(k, self._parse_value(v)) self.groups[active_group_name].add_host(host) @@ -153,8 +154,8 @@ class InventoryParser(object): def _parse_group_children(self): group = None - for line in self.lines: - line = line.strip() + for lineno in range(len(self.lines)): + line = self.lines[lineno].strip() if line is None or line == '': continue if line.startswith("[") and ":children]" in line: @@ -169,7 +170,7 @@ class InventoryParser(object): elif group: kid_group = self.groups.get(line, None) if kid_group is None: - raise errors.AnsibleError("child group is not defined: (%s)" % line) + raise errors.AnsibleError("%s:%d: child group is not defined: (%s)" % (self.filename, lineno + 1, line)) else: group.add_child_group(kid_group) @@ -180,13 +181,13 @@ class InventoryParser(object): def _parse_group_variables(self): group = None - for line in self.lines: - line = line.strip() + for lineno in range(len(self.lines)): + line = self.lines[lineno].strip() if line.startswith("[") and ":vars]" in line: line = line.replace("[","").replace(":vars]","") group = self.groups.get(line, None) if group is None: - raise errors.AnsibleError("can't add vars to undefined group: %s" % line) + raise errors.AnsibleError("%s:%d: can't add vars to undefined group: %s" % (self.filename, lineno + 1, line)) elif line.startswith("#") or line.startswith(";"): pass elif line.startswith("["): @@ -195,7 +196,7 @@ class InventoryParser(object): pass elif group: if "=" not in line: - raise errors.AnsibleError("variables assigned to group must be in key=value form") + raise errors.AnsibleError("%s:%d: variables assigned to group must be in key=value form" % (self.filename, lineno + 1)) else: (k, v) = [e.strip() for e in line.split("=", 1)] group.set_variable(k, self._parse_value(v)) diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py index 723089db88f..9b8d72de41f 100644 --- a/lib/ansible/inventory/script.py +++ b/lib/ansible/inventory/script.py @@ -22,10 +22,12 @@ import subprocess import ansible.constants as C from ansible.inventory.host import Host from ansible.inventory.group import Group +from ansible.module_utils.basic import json_dict_bytes_to_unicode from ansible import utils from ansible import errors import sys + class InventoryScript(object): ''' Host inventory parser for ansible using external inventory scripts. ''' @@ -41,6 +43,10 @@ class InventoryScript(object): except OSError, e: raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (stdout, stderr) = sp.communicate() + + if sp.returncode != 0: + raise errors.AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr)) + self.data = stdout # see comment about _meta below self.host_vars_from_top = None @@ -53,6 +59,7 @@ class InventoryScript(object): # not passing from_remote because data from CMDB is trusted self.raw = utils.parse_json(self.data) + self.raw = json_dict_bytes_to_unicode(self.raw) all = Group('all') groups = dict(all=all) @@ -141,7 +148,7 @@ class InventoryScript(object): if out.strip() == '': return dict() try: - return utils.parse_json(out) + return json_dict_bytes_to_unicode(utils.parse_json(out)) except ValueError: raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out)) diff --git a/lib/ansible/module_common.py b/lib/ansible/module_common.py index 8beff78d07d..5e3732e9677 100644 --- a/lib/ansible/module_common.py +++ b/lib/ansible/module_common.py @@ -151,11 +151,18 @@ class ModuleReplacer(object): complex_args_json = utils.jsonify(complex_args) # We force conversion of module_args to str because module_common calls shlex.split, # a standard library function that incorrectly handles Unicode input before Python 2.7.3. + # Note: it would be better to do all this conversion at the border + # (when the data is originally parsed into data structures) but + # it's currently coming from too many sources to make that + # effective. try: encoded_args = repr(module_args.encode('utf-8')) except UnicodeDecodeError: encoded_args = repr(module_args) - encoded_complex = repr(complex_args_json) + try: + encoded_complex = repr(complex_args_json.encode('utf-8')) + except UnicodeDecodeError: + encoded_complex = repr(complex_args_json.encode('utf-8')) # these strings should be part of the 'basic' snippet which is required to be included module_data = module_data.replace(REPLACER_VERSION, repr(__version__)) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 17e2773e5b6..8b14536ab53 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -87,10 +87,19 @@ except ImportError: HAVE_HASHLIB=False try: - from hashlib import md5 as _md5 + from hashlib import sha1 as _sha1 HAVE_HASHLIB=True except ImportError: - from md5 import md5 as _md5 + from sha import sha as _sha1 + +try: + from hashlib import md5 as _md5 +except ImportError: + try: + from md5 import md5 as _md5 + except ImportError: + # MD5 unavailable. Possibly FIPS mode + _md5 = None try: from hashlib import sha256 as _sha256 @@ -151,6 +160,7 @@ FILE_COMMON_ARGUMENTS=dict( serole = dict(), selevel = dict(), setype = dict(), + follow = dict(type='bool', default=False), # not taken by the file module, but other modules call file so it must ignore them. content = dict(no_log=True), backup = dict(), @@ -161,6 +171,7 @@ FILE_COMMON_ARGUMENTS=dict( directory_mode = dict(), # used by copy ) +PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?') def get_platform(): ''' what's the platform? example: Linux is a platform. ''' @@ -222,6 +233,103 @@ def load_platform_subclass(cls, *args, **kwargs): return super(cls, subclass).__new__(subclass) + +def json_dict_unicode_to_bytes(d): + ''' Recursively convert dict keys and values to byte str + + Specialized for json return because this only handles, lists, tuples, + and dict container types (the containers that the json module returns) + ''' + + if isinstance(d, unicode): + return d.encode('utf-8') + elif isinstance(d, dict): + return dict(map(json_dict_unicode_to_bytes, d.iteritems())) + elif isinstance(d, list): + return list(map(json_dict_unicode_to_bytes, d)) + elif isinstance(d, tuple): + return tuple(map(json_dict_unicode_to_bytes, d)) + else: + return d + +def json_dict_bytes_to_unicode(d): + ''' Recursively convert dict keys and values to byte str + + Specialized for json return because this only handles, lists, tuples, + and dict container types (the containers that the json module returns) + ''' + + if isinstance(d, str): + return unicode(d, 'utf-8') + elif isinstance(d, dict): + return dict(map(json_dict_bytes_to_unicode, d.iteritems())) + elif isinstance(d, list): + return list(map(json_dict_bytes_to_unicode, d)) + elif isinstance(d, tuple): + return tuple(map(json_dict_bytes_to_unicode, d)) + else: + return d + +def heuristic_log_sanitize(data): + ''' Remove strings that look like passwords from log messages ''' + # Currently filters: + # user:pass@foo/whatever and http://username:pass@wherever/foo + # This code has false positives and consumes parts of logs that are + # not passwds + + # begin: start of a passwd containing string + # end: end of a passwd containing string + # sep: char between user and passwd + # prev_begin: where in the overall string to start a search for + # a passwd + # sep_search_end: where in the string to end a search for the sep + output = [] + begin = len(data) + prev_begin = begin + sep = 1 + while sep: + # Find the potential end of a passwd + try: + end = data.rindex('@', 0, begin) + except ValueError: + # No passwd in the rest of the data + output.insert(0, data[0:begin]) + break + + # Search for the beginning of a passwd + sep = None + sep_search_end = end + while not sep: + # URL-style username+password + try: + begin = data.rindex('://', 0, sep_search_end) + except ValueError: + # No url style in the data, check for ssh style in the + # rest of the string + begin = 0 + # Search for separator + try: + sep = data.index(':', begin + 3, end) + except ValueError: + # No separator; choices: + if begin == 0: + # Searched the whole string so there's no password + # here. Return the remaining data + output.insert(0, data[0:begin]) + break + # Search for a different beginning of the password field. + sep_search_end = begin + continue + if sep: + # Password was found; remove it. + output.insert(0, data[end:prev_begin]) + output.insert(0, '********') + output.insert(0, data[begin:sep + 1]) + prev_begin = begin + + return ''.join(output) + + class AnsibleModule(object): def __init__(self, argument_spec, bypass_checks=False, no_log=False, @@ -295,6 +403,11 @@ class AnsibleModule(object): else: path = os.path.expanduser(path) + # if the path is a symlink, and we're following links, get + # the target of the link instead for testing + if params.get('follow', False) and os.path.islink(path): + path = os.path.realpath(path) + mode = params.get('mode', None) owner = params.get('owner', None) group = params.get('group', None) @@ -962,69 +1075,10 @@ class AnsibleModule(object): if k in params: self.fail_json(msg="duplicate parameter: %s (value=%s)" % (k, v)) params[k] = v - params2 = json.loads(MODULE_COMPLEX_ARGS) + params2 = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) params2.update(params) return (params2, args) - def _heuristic_log_sanitize(self, data): - ''' Remove strings that look like passwords from log messages ''' - # Currently filters: - # user:pass@foo/whatever and http://username:pass@wherever/foo - # This code has false positives and consumes parts of logs that are - # not passwds - - # begin: start of a passwd containing string - # end: end of a passwd containing string - # sep: char between user and passwd - # prev_begin: where in the overall string to start a search for - # a passwd - # sep_search_end: where in the string to end a search for the sep - output = [] - begin = len(data) - prev_begin = begin - sep = 1 - while sep: - # Find the potential end of a passwd - try: - end = data.rindex('@', 0, begin) - except ValueError: - # No passwd in the rest of the data - output.insert(0, data[0:begin]) - break - - # Search for the beginning of a passwd - sep = None - sep_search_end = end - while not sep: - # URL-style username+password - try: - begin = data.rindex('://', 0, sep_search_end) - except ValueError: - # No url style in the data, check for ssh style in the - # rest of the string - begin = 0 - # Search for separator - try: - sep = data.index(':', begin + 3, end) - except ValueError: - # No separator; choices: - if begin == 0: - # Searched the whole string so there's no password - # here. Return the remaining data - output.insert(0, data[0:begin]) - break - # Search for a different beginning of the password field. - sep_search_end = begin - continue - if sep: - # Password was found; remove it. - output.insert(0, data[end:prev_begin]) - output.insert(0, '********') - output.insert(0, data[begin:sep + 1]) - prev_begin = begin - - return ''.join(output) - def _log_invocation(self): ''' log that ansible ran the module ''' # TODO: generalize a separate log function and make log_invocation use it @@ -1047,7 +1101,7 @@ class AnsibleModule(object): param_val = str(param_val) elif isinstance(param_val, unicode): param_val = param_val.encode('utf-8') - log_args[param] = self._heuristic_log_sanitize(param_val) + log_args[param] = heuristic_log_sanitize(param_val) module = 'ansible-%s' % os.path.basename(__file__) msg = [] @@ -1069,12 +1123,11 @@ class AnsibleModule(object): msg = msg.encode('utf-8') if (has_journal): - journal_args = ["MESSAGE=%s %s" % (module, msg)] - journal_args.append("MODULE=%s" % os.path.basename(__file__)) + journal_args = [("MODULE", os.path.basename(__file__))] for arg in log_args: - journal_args.append(arg.upper() + "=" + str(log_args[arg])) + journal_args.append((arg.upper(), str(log_args[arg]))) try: - journal.sendv(*journal_args) + journal.send("%s %s" % (module, msg), **dict(journal_args)) except IOError, e: # fall back to syslog since logging to journal failed syslog.openlog(str(module), 0, syslog.LOG_USER) @@ -1207,9 +1260,24 @@ class AnsibleModule(object): return digest.hexdigest() def md5(self, filename): - ''' Return MD5 hex digest of local file using digest_from_file(). ''' + ''' Return MD5 hex digest of local file using digest_from_file(). + + Do not use this function unless you have no other choice for: + 1) Optional backwards compatibility + 2) Compatibility with a third party protocol + + This function will not work on systems complying with FIPS-140-2. + + Most uses of this function can use the module.sha1 function instead. + ''' + if not _md5: + raise ValueError('MD5 not available. Possibly running in FIPS mode') return self.digest_from_file(filename, _md5()) + def sha1(self, filename): + ''' Return SHA1 hex digest of local file using digest_from_file(). ''' + return self.digest_from_file(filename, _sha1()) + def sha256(self, filename): ''' Return SHA-256 hex digest of local file using digest_from_file(). ''' if not HAVE_HASHLIB: @@ -1320,7 +1388,7 @@ class AnsibleModule(object): # rename might not preserve context self.set_context_if_different(dest, context, False) - def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False): + def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None): ''' Execute a command, returns rc, stdout, and stderr. args is the command to run @@ -1328,12 +1396,17 @@ class AnsibleModule(object): If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False If args is a string and use_unsafe_shell=True it run with shell=True. Other arguments: - - check_rc (boolean) Whether to call fail_json in case of - non zero RC. Default is False. - - close_fds (boolean) See documentation for subprocess.Popen(). - Default is True. - - executable (string) See documentation for subprocess.Popen(). - Default is None. + - check_rc (boolean) Whether to call fail_json in case of + non zero RC. Default is False. + - close_fds (boolean) See documentation for subprocess.Popen(). + Default is True. + - executable (string) See documentation for subprocess.Popen(). + Default is None. + - prompt_regex (string) A regex string (not a compiled regex) which + can be used to detect prompts in the stdout + which would otherwise cause the execution + to hang (especially if no input data is + specified) ''' shell = False @@ -1349,6 +1422,13 @@ class AnsibleModule(object): msg = "Argument 'args' to run_command must be list or string" self.fail_json(rc=257, cmd=args, msg=msg) + prompt_re = None + if prompt_regex: + try: + prompt_re = re.compile(prompt_regex, re.MULTILINE) + except re.error: + self.fail_json(msg="invalid prompt regular expression given to run_command") + # expand things like $HOME and ~ if not shell: args = [ os.path.expandvars(os.path.expanduser(x)) for x in args ] @@ -1365,27 +1445,27 @@ class AnsibleModule(object): # create a printable version of the command for use # in reporting later, which strips out things like # passwords from the args list - if isinstance(args, list): - clean_args = " ".join(pipes.quote(arg) for arg in args) + if isinstance(args, basestring): + to_clean_args = shlex.split(args.encode('utf-8')) else: - clean_args = args + to_clean_args = args - # all clean strings should return two match groups, - # where the first is the CLI argument and the second - # is the password/key/phrase that will be hidden - clean_re_strings = [ - # this removes things like --password, --pass, --pass-wd, etc. - # optionally followed by an '=' or a space. The password can - # be quoted or not too, though it does not care about quotes - # that are not balanced - # source: http://blog.stevenlevithan.com/archives/match-quoted-string - r'([-]{0,2}pass[-]?(?:word|wd)?[=\s]?)((?:["\'])?(?:[^\s])*(?:\1)?)', - r'^(?P.*:)(?P.*)(?P\@.*)$', - # TODO: add more regex checks here - ] - for re_str in clean_re_strings: - r = re.compile(re_str) - clean_args = r.sub(r'\1********', clean_args) + clean_args = [] + is_passwd = False + for arg in to_clean_args: + if is_passwd: + is_passwd = False + clean_args.append('********') + continue + if PASSWD_ARG_RE.match(arg): + sep_idx = arg.find('=') + if sep_idx > -1: + clean_args.append('%s=********' % arg[:sep_idx]) + continue + else: + is_passwd = True + clean_args.append(heuristic_log_sanitize(arg)) + clean_args = ' '.join(pipes.quote(arg) for arg in clean_args) if data: st_in = subprocess.PIPE @@ -1442,6 +1522,10 @@ class AnsibleModule(object): stderr += dat if dat == '': rpipes.remove(cmd.stderr) + # if we're checking for prompts, do it now + if prompt_re: + if prompt_re.search(stdout) and not data: + return (257, stdout, "A prompt was encountered while running a command, but no input data was specified") # only break out if no pipes are left to read or # the pipes are completely read and # the process is terminated @@ -1466,7 +1550,7 @@ class AnsibleModule(object): self.fail_json(rc=257, msg=traceback.format_exc(), cmd=clean_args) if rc != 0 and check_rc: - msg = stderr.rstrip() + msg = heuristic_log_sanitize(stderr.rstrip()) self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg) # reset the pwd diff --git a/lib/ansible/module_utils/database.py b/lib/ansible/module_utils/database.py new file mode 100644 index 00000000000..0dd1990d3e7 --- /dev/null +++ b/lib/ansible/module_utils/database.py @@ -0,0 +1,128 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2014, Toshio Kuratomi +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +class SQLParseError(Exception): + pass + +class UnclosedQuoteError(SQLParseError): + pass + +# maps a type of identifier to the maximum number of dot levels that are +# allowed to specifiy that identifier. For example, a database column can be +# specified by up to 4 levels: database.schema.table.column +_PG_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, schema=2, table=3, column=4, role=1) +_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1) + +def _find_end_quote(identifier, quote_char): + accumulate = 0 + while True: + try: + quote = identifier.index(quote_char) + except ValueError: + raise UnclosedQuoteError + accumulate = accumulate + quote + try: + next_char = identifier[quote+1] + except IndexError: + return accumulate + if next_char == quote_char: + try: + identifier = identifier[quote+2:] + accumulate = accumulate + 2 + except IndexError: + raise UnclosedQuoteError + else: + return accumulate + + +def _identifier_parse(identifier, quote_char): + if not identifier: + raise SQLParseError('Identifier name unspecified or unquoted trailing dot') + + already_quoted = False + if identifier.startswith(quote_char): + already_quoted = True + try: + end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1 + except UnclosedQuoteError: + already_quoted = False + else: + if end_quote < len(identifier) - 1: + if identifier[end_quote+1] == '.': + dot = end_quote + 1 + first_identifier = identifier[:dot] + next_identifier = identifier[dot+1:] + further_identifiers = _identifier_parse(next_identifier, quote_char) + further_identifiers.insert(0, first_identifier) + else: + raise SQLParseError('User escaped identifiers must escape extra quotes') + else: + further_identifiers = [identifier] + + if not already_quoted: + try: + dot = identifier.index('.') + except ValueError: + identifier = identifier.replace(quote_char, quote_char*2) + identifier = ''.join((quote_char, identifier, quote_char)) + further_identifiers = [identifier] + else: + if dot == 0 or dot >= len(identifier) - 1: + identifier = identifier.replace(quote_char, quote_char*2) + identifier = ''.join((quote_char, identifier, quote_char)) + further_identifiers = [identifier] + else: + first_identifier = identifier[:dot] + next_identifier = identifier[dot+1:] + further_identifiers = _identifier_parse(next_identifier, quote_char) + first_identifier = first_identifier.replace(quote_char, quote_char*2) + first_identifier = ''.join((quote_char, first_identifier, quote_char)) + further_identifiers.insert(0, first_identifier) + + return further_identifiers + + +def pg_quote_identifier(identifier, id_type): + identifier_fragments = _identifier_parse(identifier, quote_char='"') + if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]: + raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type])) + return '.'.join(identifier_fragments) + +def mysql_quote_identifier(identifier, id_type): + identifier_fragments = _identifier_parse(identifier, quote_char='`') + if len(identifier_fragments) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]: + raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type])) + + special_cased_fragments = [] + for fragment in identifier_fragments: + if fragment == '`*`': + special_cased_fragments.append('*') + else: + special_cased_fragments.append(fragment) + + return '.'.join(special_cased_fragments) diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index 6400e74f373..0f08fead180 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -36,7 +36,10 @@ AWS_REGIONS = [ 'ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', + 'cn-north-1', + 'eu-central-1', 'eu-west-1', + 'eu-central-1', 'sa-east-1', 'us-east-1', 'us-west-1', @@ -54,7 +57,6 @@ def aws_common_argument_spec(): security_token=dict(no_log=True), profile=dict(), ) - return spec def ec2_argument_spec(): @@ -164,6 +166,11 @@ def boto_fix_security_token_in_profile(conn, profile_name): def connect_to_aws(aws_module, region, **params): conn = aws_module.connect_to_region(region, **params) + if not conn: + if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]: + raise StandardError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto" % (region, aws_module.__name__)) + else: + raise StandardError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__)) if params.get('profile_name'): conn = boto_fix_security_token_in_profile(conn, params['profile_name']) return conn @@ -179,13 +186,13 @@ def ec2_connect(module): if region: try: ec2 = connect_to_aws(boto.ec2, region, **boto_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) # Otherwise, no region so we fallback to the old connection method elif ec2_url: try: ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="Either region or ec2_url must be specified") diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index f9d2fdbf336..371c62aa919 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -29,6 +29,7 @@ import socket import struct import datetime import getpass +import pwd import ConfigParser import StringIO @@ -46,7 +47,7 @@ except ImportError: import simplejson as json # -------------------------------------------------------------- -# timeout function to make sure some fact gathering +# timeout function to make sure some fact gathering # steps do not exceed a time limit class TimeoutError(Exception): @@ -82,19 +83,22 @@ class Facts(object): subclass Facts. """ - _I386RE = re.compile(r'i[3456]86') + # i86pc is a Solaris and derivatives-ism + _I386RE = re.compile(r'i([3456]86|86pc)') # For the most part, we assume that platform.dist() will tell the truth. # This is the fallback to handle unknowns or exceptions - OSDIST_DICT = { '/etc/redhat-release': 'RedHat', - '/etc/vmware-release': 'VMwareESX', - '/etc/openwrt_release': 'OpenWrt', - '/etc/system-release': 'OtherLinux', - '/etc/alpine-release': 'Alpine', - '/etc/release': 'Solaris', - '/etc/arch-release': 'Archlinux', - '/etc/SuSE-release': 'SuSE', - '/etc/gentoo-release': 'Gentoo', - '/etc/os-release': 'Debian' } + OSDIST_LIST = ( ('/etc/redhat-release', 'RedHat'), + ('/etc/vmware-release', 'VMwareESX'), + ('/etc/openwrt_release', 'OpenWrt'), + ('/etc/system-release', 'OtherLinux'), + ('/etc/alpine-release', 'Alpine'), + ('/etc/release', 'Solaris'), + ('/etc/arch-release', 'Archlinux'), + ('/etc/SuSE-release', 'SuSE'), + ('/etc/os-release', 'SuSE'), + ('/etc/gentoo-release', 'Gentoo'), + ('/etc/os-release', 'Debian'), + ('/etc/lsb-release', 'Mandriva') ) SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' } # A list of dicts. If there is a platform with more than one @@ -116,19 +120,23 @@ class Facts(object): { 'path' : '/usr/bin/pkg', 'name' : 'pkg' }, ] - def __init__(self): + def __init__(self, load_on_init=True): + self.facts = {} - self.get_platform_facts() - self.get_distribution_facts() - self.get_cmdline() - self.get_public_ssh_host_keys() - self.get_selinux_facts() - self.get_pkg_mgr_facts() - self.get_lsb_facts() - self.get_date_time_facts() - self.get_user_facts() - self.get_local_facts() - self.get_env_facts() + + if load_on_init: + self.get_platform_facts() + self.get_distribution_facts() + self.get_cmdline() + self.get_public_ssh_host_keys() + self.get_selinux_facts() + self.get_fips_facts() + self.get_pkg_mgr_facts() + self.get_lsb_facts() + self.get_date_time_facts() + self.get_user_facts() + self.get_local_facts() + self.get_env_facts() def populate(self): return self.facts @@ -185,7 +193,7 @@ class Facts(object): # if that fails, skip it rc, out, err = module.run_command(fn) else: - out = open(fn).read() + out = get_file_content(fn, default='') # load raw json fact = 'loading %s' % fact_base @@ -230,6 +238,8 @@ class Facts(object): FreeBSD = 'FreeBSD', HPUX = 'HP-UX' ) + # TODO: Rewrite this to use the function references in a dict pattern + # as it's much cleaner than this massive if-else if self.facts['system'] == 'AIX': self.facts['distribution'] = 'AIX' rc, out, err = module.run_command("/usr/bin/oslevel") @@ -268,54 +278,116 @@ class Facts(object): self.facts['distribution_major_version'] = dist[1].split('.')[0] or 'NA' self.facts['distribution_release'] = dist[2] or 'NA' # Try to handle the exceptions now ... - for (path, name) in Facts.OSDIST_DICT.items(): - if os.path.exists(path) and os.path.getsize(path) > 0: - if self.facts['distribution'] == 'Fedora': - pass - elif name == 'RedHat': - data = get_file_content(path) - if 'Red Hat' in data: + for (path, name) in Facts.OSDIST_LIST: + if os.path.exists(path): + if os.path.getsize(path) > 0: + if self.facts['distribution'] in ('Fedora', ): + # Once we determine the value is one of these distros + # we trust the values are always correct + break + elif name == 'RedHat': + data = get_file_content(path) + if 'Red Hat' in data: + self.facts['distribution'] = name + else: + self.facts['distribution'] = data.split()[0] + break + elif name == 'OtherLinux': + data = get_file_content(path) + if 'Amazon' in data: + self.facts['distribution'] = 'Amazon' + self.facts['distribution_version'] = data.split()[-1] + break + elif name == 'OpenWrt': + data = get_file_content(path) + if 'OpenWrt' in data: + self.facts['distribution'] = name + version = re.search('DISTRIB_RELEASE="(.*)"', data) + if version: + self.facts['distribution_version'] = version.groups()[0] + release = re.search('DISTRIB_CODENAME="(.*)"', data) + if release: + self.facts['distribution_release'] = release.groups()[0] + break + elif name == 'Alpine': + data = get_file_content(path) self.facts['distribution'] = name - else: - self.facts['distribution'] = data.split()[0] - elif name == 'OtherLinux': - data = get_file_content(path) - if 'Amazon' in data: - self.facts['distribution'] = 'Amazon' - self.facts['distribution_version'] = data.split()[-1] - elif name == 'OpenWrt': - data = get_file_content(path) - if 'OpenWrt' in data: - self.facts['distribution'] = name - version = re.search('DISTRIB_RELEASE="(.*)"', data) - if version: - self.facts['distribution_version'] = version.groups()[0] - release = re.search('DISTRIB_CODENAME="(.*)"', data) - if release: - self.facts['distribution_release'] = release.groups()[0] - elif name == 'Alpine': - data = get_file_content(path) - self.facts['distribution'] = 'Alpine' - self.facts['distribution_version'] = data - elif name == 'Solaris': - data = get_file_content(path).split('\n')[0] - ora_prefix = '' - if 'Oracle Solaris' in data: - data = data.replace('Oracle ','') - ora_prefix = 'Oracle ' - self.facts['distribution'] = data.split()[0] - self.facts['distribution_version'] = data.split()[1] - self.facts['distribution_release'] = ora_prefix + data - elif name == 'SuSE': - data = get_file_content(path).splitlines() - for line in data: - if '=' in line: - self.facts['distribution_release'] = line.split('=')[1].strip() - elif name == 'Debian': - data = get_file_content(path).split('\n')[0] - release = re.search("PRETTY_NAME.+ \(?([^ ]+?)\)?\"", data) - if release: - self.facts['distribution_release'] = release.groups()[0] + self.facts['distribution_version'] = data + break + elif name == 'Solaris': + data = get_file_content(path).split('\n')[0] + if 'Solaris' in data: + ora_prefix = '' + if 'Oracle Solaris' in data: + data = data.replace('Oracle ','') + ora_prefix = 'Oracle ' + self.facts['distribution'] = data.split()[0] + self.facts['distribution_version'] = data.split()[1] + self.facts['distribution_release'] = ora_prefix + data + break + + uname_rc, uname_out, uname_err = module.run_command(['uname', '-v']) + distribution_version = None + if 'SmartOS' in data: + self.facts['distribution'] = 'SmartOS' + if os.path.exists('/etc/product'): + product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').split('\n') if ': ' in l]) + if 'Image' in product_data: + distribution_version = product_data.get('Image').split()[-1] + elif 'OpenIndiana' in data: + self.facts['distribution'] = 'OpenIndiana' + elif 'OmniOS' in data: + self.facts['distribution'] = 'OmniOS' + distribution_version = data.split()[-1] + elif uname_rc == 0 and 'NexentaOS_' in uname_out: + self.facts['distribution'] = 'Nexenta' + distribution_version = data.split()[-1].lstrip('v') + + if self.facts['distribution'] in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'): + self.facts['distribution_release'] = data.strip() + if distribution_version is not None: + self.facts['distribution_version'] = distribution_version + elif uname_rc == 0: + self.facts['distribution_version'] = uname_out.split('\n')[0].strip() + break + + elif name == 'SuSE': + data = get_file_content(path) + if 'suse' in data.lower(): + if path == '/etc/os-release': + release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data) + distdata = get_file_content(path).split('\n')[0] + self.facts['distribution'] = distdata.split('=')[1] + if release: + self.facts['distribution_release'] = release.groups()[0] + break + elif path == '/etc/SuSE-release': + data = data.splitlines() + distdata = get_file_content(path).split('\n')[0] + self.facts['distribution'] = distdata.split()[0] + for line in data: + release = re.search('CODENAME *= *([^\n]+)', line) + if release: + self.facts['distribution_release'] = release.groups()[0].strip() + break + elif name == 'Debian': + data = get_file_content(path) + if 'Debian' in data: + release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data) + if release: + self.facts['distribution_release'] = release.groups()[0] + break + elif name == 'Mandriva': + data = get_file_content(path) + if 'Mandriva' in data: + version = re.search('DISTRIB_RELEASE="(.*)"', data) + if version: + self.facts['distribution_version'] = version.groups()[0] + release = re.search('DISTRIB_CODENAME="(.*)"', data) + if release: + self.facts['distribution_release'] = release.groups()[0] + self.facts['distribution'] = name + break else: self.facts['distribution'] = name @@ -394,20 +466,16 @@ class Facts(object): self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0] elif lsb_path is None and os.path.exists('/etc/lsb-release'): self.facts['lsb'] = {} - f = open('/etc/lsb-release', 'r') - try: - for line in f.readlines(): - value = line.split('=',1)[1].strip() - if 'DISTRIB_ID' in line: - self.facts['lsb']['id'] = value - elif 'DISTRIB_RELEASE' in line: - self.facts['lsb']['release'] = value - elif 'DISTRIB_DESCRIPTION' in line: - self.facts['lsb']['description'] = value - elif 'DISTRIB_CODENAME' in line: - self.facts['lsb']['codename'] = value - finally: - f.close() + for line in get_file_lines('/etc/lsb-release'): + value = line.split('=',1)[1].strip() + if 'DISTRIB_ID' in line: + self.facts['lsb']['id'] = value + elif 'DISTRIB_RELEASE' in line: + self.facts['lsb']['release'] = value + elif 'DISTRIB_DESCRIPTION' in line: + self.facts['lsb']['description'] = value + elif 'DISTRIB_CODENAME' in line: + self.facts['lsb']['codename'] = value else: return self.facts @@ -451,6 +519,13 @@ class Facts(object): self.facts['selinux']['type'] = 'unknown' + def get_fips_facts(self): + self.facts['fips'] = False + data = get_file_content('/proc/sys/crypto/fips_enabled') + if data and data == '1': + self.facts['fips'] = True + + def get_date_time_facts(self): self.facts['date_time'] = {} @@ -476,6 +551,12 @@ class Facts(object): # User def get_user_facts(self): self.facts['user_id'] = getpass.getuser() + pwent = pwd.getpwnam(getpass.getuser()) + self.facts['user_uid'] = pwent.pw_uid + self.facts['user_gid'] = pwent.pw_gid + self.facts['user_gecos'] = pwent.pw_gecos + self.facts['user_dir'] = pwent.pw_dir + self.facts['user_shell'] = pwent.pw_shell def get_env_facts(self): self.facts['env'] = {} @@ -527,7 +608,11 @@ class LinuxHardware(Hardware): """ platform = 'Linux' - MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'] + + # Originally only had these four as toplevelfacts + ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree')) + # Now we have all of these in a dict structure + MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached')) def __init__(self): Hardware.__init__(self) @@ -546,31 +631,95 @@ class LinuxHardware(Hardware): def get_memory_facts(self): if not os.access("/proc/meminfo", os.R_OK): return - for line in open("/proc/meminfo").readlines(): + + memstats = {} + for line in get_file_lines("/proc/meminfo"): data = line.split(":", 1) key = data[0] - if key in LinuxHardware.MEMORY_FACTS: + if key in self.ORIGINAL_MEMORY_FACTS: val = data[1].strip().split(' ')[0] self.facts["%s_mb" % key.lower()] = long(val) / 1024 + if key in self.MEMORY_FACTS: + val = data[1].strip().split(' ')[0] + memstats[key.lower()] = long(val) / 1024 + + if None not in (memstats.get('memtotal'), memstats.get('memfree')): + memstats['real:used'] = memstats['memtotal'] - memstats['memfree'] + if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')): + memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers'] + if None not in (memstats.get('memtotal'), memstats.get('nocache:free')): + memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free'] + if None not in (memstats.get('swaptotal'), memstats.get('swapfree')): + memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree'] + + self.facts['memory_mb'] = { + 'real' : { + 'total': memstats.get('memtotal'), + 'used': memstats.get('real:used'), + 'free': memstats.get('memfree'), + }, + 'nocache' : { + 'free': memstats.get('nocache:free'), + 'used': memstats.get('nocache:used'), + }, + 'swap' : { + 'total': memstats.get('swaptotal'), + 'free': memstats.get('swapfree'), + 'used': memstats.get('swap:used'), + 'cached': memstats.get('swapcached'), + }, + } + def get_cpu_facts(self): i = 0 + vendor_id_occurrence = 0 + model_name_occurrence = 0 physid = 0 coreid = 0 sockets = {} cores = {} + + xen = False + xen_paravirt = False + try: + if os.path.exists('/proc/xen'): + xen = True + else: + for line in get_file_lines('/sys/hypervisor/type'): + if line.strip() == 'xen': + xen = True + # Only interested in the first line + break + except IOError: + pass + if not os.access("/proc/cpuinfo", os.R_OK): return self.facts['processor'] = [] - for line in open("/proc/cpuinfo").readlines(): + for line in get_file_lines('/proc/cpuinfo'): data = line.split(":", 1) key = data[0].strip() + + if xen: + if key == 'flags': + # Check for vme cpu flag, Xen paravirt does not expose this. + # Need to detect Xen paravirt because it exposes cpuinfo + # differently than Xen HVM or KVM and causes reporting of + # only a single cpu core. + if 'vme' not in data: + xen_paravirt = True + # model name is for Intel arch, Processor (mind the uppercase P) # works for some ARM devices, like the Sheevaplug. if key == 'model name' or key == 'Processor' or key == 'vendor_id': if 'processor' not in self.facts: self.facts['processor'] = [] self.facts['processor'].append(data[1].strip()) + if key == 'vendor_id': + vendor_id_occurrence += 1 + if key == 'model name': + model_name_occurrence += 1 i += 1 elif key == 'physical id': physid = data[1].strip() @@ -586,13 +735,23 @@ class LinuxHardware(Hardware): cores[coreid] = int(data[1].strip()) elif key == '# processors': self.facts['processor_cores'] = int(data[1].strip()) + + if vendor_id_occurrence == model_name_occurrence: + i = vendor_id_occurrence + if self.facts['architecture'] != 's390x': - self.facts['processor_count'] = sockets and len(sockets) or i - self.facts['processor_cores'] = sockets.values() and sockets.values()[0] or 1 - self.facts['processor_threads_per_core'] = ((cores.values() and - cores.values()[0] or 1) / self.facts['processor_cores']) - self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] * - self.facts['processor_count'] * self.facts['processor_cores']) + if xen_paravirt: + self.facts['processor_count'] = i + self.facts['processor_cores'] = i + self.facts['processor_threads_per_core'] = 1 + self.facts['processor_vcpus'] = i + else: + self.facts['processor_count'] = sockets and len(sockets) or i + self.facts['processor_cores'] = sockets.values() and sockets.values()[0] or 1 + self.facts['processor_threads_per_core'] = ((cores.values() and + cores.values()[0] or 1) / self.facts['processor_cores']) + self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] * + self.facts['processor_count'] * self.facts['processor_cores']) def get_dmi_facts(self): ''' learn dmi facts from system @@ -683,6 +842,13 @@ class LinuxHardware(Hardware): size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail) except OSError, e: continue + lsblkPath = module.get_bin_path("lsblk") + rc, out, err = module.run_command("%s -ln --output UUID %s" % (lsblkPath, fields[0]), use_unsafe_shell=True) + + if rc == 0: + uuid = out.strip() + else: + uuid = 'NA' self.facts['mounts'].append( {'mount': fields[1], @@ -692,6 +858,7 @@ class LinuxHardware(Hardware): # statvfs data 'size_total': size_total, 'size_available': size_available, + 'uuid': uuid, }) def get_device_facts(self): @@ -1108,7 +1275,7 @@ class NetBSDHardware(Hardware): if not os.access("/proc/cpuinfo", os.R_OK): return self.facts['processor'] = [] - for line in open("/proc/cpuinfo").readlines(): + for line in get_file_lines("/proc/cpuinfo"): data = line.split(":", 1) key = data[0].strip() # model name is for Intel arch, Processor (mind the uppercase P) @@ -1134,7 +1301,7 @@ class NetBSDHardware(Hardware): def get_memory_facts(self): if not os.access("/proc/meminfo", os.R_OK): return - for line in open("/proc/meminfo").readlines(): + for line in get_file_lines("/proc/meminfo"): data = line.split(":", 1) key = data[0] if key in NetBSDHardware.MEMORY_FACTS: @@ -1312,7 +1479,7 @@ class HPUX(Hardware): self.facts['memtotal_mb'] = int(data) / 1024 except AttributeError: #For systems where memory details aren't sent to syslog or the log has rotated, use parsed - #adb output. Unfortunatley /dev/kmem doesn't have world-read, so this only works as root. + #adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root. if os.access("/dev/kmem", os.R_OK): rc, out, err = module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True) if not err: @@ -1516,44 +1683,44 @@ class LinuxNetwork(Network): device = os.path.basename(path) interfaces[device] = { 'device': device } if os.path.exists(os.path.join(path, 'address')): - macaddress = open(os.path.join(path, 'address')).read().strip() + macaddress = get_file_content(os.path.join(path, 'address'), default='') if macaddress and macaddress != '00:00:00:00:00:00': interfaces[device]['macaddress'] = macaddress if os.path.exists(os.path.join(path, 'mtu')): - interfaces[device]['mtu'] = int(open(os.path.join(path, 'mtu')).read().strip()) + interfaces[device]['mtu'] = int(get_file_content(os.path.join(path, 'mtu'))) if os.path.exists(os.path.join(path, 'operstate')): - interfaces[device]['active'] = open(os.path.join(path, 'operstate')).read().strip() != 'down' + interfaces[device]['active'] = get_file_content(os.path.join(path, 'operstate')) != 'down' # if os.path.exists(os.path.join(path, 'carrier')): -# interfaces[device]['link'] = open(os.path.join(path, 'carrier')).read().strip() == '1' +# interfaces[device]['link'] = get_file_content(os.path.join(path, 'carrier')) == '1' if os.path.exists(os.path.join(path, 'device','driver', 'module')): interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module'))) if os.path.exists(os.path.join(path, 'type')): - type = open(os.path.join(path, 'type')).read().strip() - if type == '1': + _type = get_file_content(os.path.join(path, 'type')) + if _type == '1': interfaces[device]['type'] = 'ether' - elif type == '512': + elif _type == '512': interfaces[device]['type'] = 'ppp' - elif type == '772': + elif _type == '772': interfaces[device]['type'] = 'loopback' if os.path.exists(os.path.join(path, 'bridge')): interfaces[device]['type'] = 'bridge' interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ] if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')): - interfaces[device]['id'] = open(os.path.join(path, 'bridge', 'bridge_id')).read().strip() + interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id'), default='') if os.path.exists(os.path.join(path, 'bridge', 'stp_state')): - interfaces[device]['stp'] = open(os.path.join(path, 'bridge', 'stp_state')).read().strip() == '1' + interfaces[device]['stp'] = get_file_content(os.path.join(path, 'bridge', 'stp_state')) == '1' if os.path.exists(os.path.join(path, 'bonding')): interfaces[device]['type'] = 'bonding' - interfaces[device]['slaves'] = open(os.path.join(path, 'bonding', 'slaves')).read().split() - interfaces[device]['mode'] = open(os.path.join(path, 'bonding', 'mode')).read().split()[0] - interfaces[device]['miimon'] = open(os.path.join(path, 'bonding', 'miimon')).read().split()[0] - interfaces[device]['lacp_rate'] = open(os.path.join(path, 'bonding', 'lacp_rate')).read().split()[0] - primary = open(os.path.join(path, 'bonding', 'primary')).read() + interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves'), default='').split() + interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode'), default='').split()[0] + interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon'), default='').split()[0] + interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate'), default='').split()[0] + primary = get_file_content(os.path.join(path, 'bonding', 'primary')) if primary: interfaces[device]['primary'] = primary path = os.path.join(path, 'bonding', 'all_slaves_active') if os.path.exists(path): - interfaces[device]['all_slaves_active'] = open(path).read() == '1' + interfaces[device]['all_slaves_active'] = get_file_content(path) == '1' # Check whether an interface is in promiscuous mode if os.path.exists(os.path.join(path,'flags')): @@ -1561,7 +1728,7 @@ class LinuxNetwork(Network): # The second byte indicates whether the interface is in promiscuous mode. # 1 = promisc # 0 = no promisc - data = int(open(os.path.join(path, 'flags')).read().strip(),16) + data = int(get_file_content(os.path.join(path, 'flags')),16) promisc_mode = (data & 0x0100 > 0) interfaces[device]['promisc'] = promisc_mode @@ -2107,7 +2274,7 @@ class LinuxVirtual(Virtual): self.facts['virtualization_type'] = 'xen' self.facts['virtualization_role'] = 'guest' try: - for line in open('/proc/xen/capabilities'): + for line in get_file_lines('/proc/xen/capabilities'): if "control_d" in line: self.facts['virtualization_role'] = 'host' except IOError: @@ -2123,7 +2290,11 @@ class LinuxVirtual(Virtual): return if os.path.exists('/proc/1/cgroup'): - for line in open('/proc/1/cgroup').readlines(): + for line in get_file_lines('/proc/1/cgroup'): + if re.search('/docker/', line): + self.facts['virtualization_type'] = 'docker' + self.facts['virtualization_role'] = 'guest' + return if re.search('/lxc/', line): self.facts['virtualization_type'] = 'lxc' self.facts['virtualization_role'] = 'guest' @@ -2171,8 +2342,13 @@ class LinuxVirtual(Virtual): self.facts['virtualization_role'] = 'guest' return + if sys_vendor == 'QEMU': + self.facts['virtualization_type'] = 'kvm' + self.facts['virtualization_role'] = 'guest' + return + if os.path.exists('/proc/self/status'): - for line in open('/proc/self/status').readlines(): + for line in get_file_lines('/proc/self/status'): if re.match('^VxID: \d+', line): self.facts['virtualization_type'] = 'linux_vserver' if re.match('^VxID: 0', line): @@ -2182,7 +2358,7 @@ class LinuxVirtual(Virtual): return if os.path.exists('/proc/cpuinfo'): - for line in open('/proc/cpuinfo').readlines(): + for line in get_file_lines('/proc/cpuinfo'): if re.match('^model name.*QEMU Virtual CPU', line): self.facts['virtualization_type'] = 'kvm' elif re.match('^vendor_id.*User Mode Linux', line): @@ -2215,7 +2391,7 @@ class LinuxVirtual(Virtual): # Beware that we can have both kvm and virtualbox running on a single system if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK): modules = [] - for line in open("/proc/modules").readlines(): + for line in get_file_lines("/proc/modules"): data = line.split(" ", 1) modules.append(data[0]) @@ -2326,14 +2502,28 @@ class SunOSVirtual(Virtual): self.facts['virtualization_type'] = 'virtualbox' self.facts['virtualization_role'] = 'guest' -def get_file_content(path, default=None): +def get_file_content(path, default=None, strip=True): data = default if os.path.exists(path) and os.access(path, os.R_OK): - data = open(path).read().strip() - if len(data) == 0: - data = default + try: + datafile = open(path) + data = datafile.read() + if strip: + data = data.strip() + if len(data) == 0: + data = default + finally: + datafile.close() return data +def get_file_lines(path): + '''file.readlines() that closes the file''' + datafile = open(path) + try: + return datafile.readlines() + finally: + datafile.close() + def ansible_facts(module): facts = {} facts.update(Facts().populate()) diff --git a/lib/ansible/module_utils/gce.py b/lib/ansible/module_utils/gce.py index 68aa66c41a9..37a4bf1deaf 100644 --- a/lib/ansible/module_utils/gce.py +++ b/lib/ansible/module_utils/gce.py @@ -32,7 +32,7 @@ import pprint USER_AGENT_PRODUCT="Ansible-gce" USER_AGENT_VERSION="v1" -def gce_connect(module): +def gce_connect(module, provider=None): """Return a Google Cloud Engine connection.""" service_account_email = module.params.get('service_account_email', None) pem_file = module.params.get('pem_file', None) @@ -71,8 +71,14 @@ def gce_connect(module): 'secrets file.') return None + # Allow for passing in libcloud Google DNS (e.g, Provider.GOOGLE) + if provider is None: + provider = Provider.GCE + try: - gce = get_driver(Provider.GCE)(service_account_email, pem_file, datacenter=module.params.get('zone'), project=project_id) + gce = get_driver(provider)(service_account_email, pem_file, + datacenter=module.params.get('zone', None), + project=project_id) gce.connection.user_agent_append("%s/%s" % ( USER_AGENT_PRODUCT, USER_AGENT_VERSION)) except (RuntimeError, ValueError), e: diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 3406e2c7da0..99dbf2c03ad 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -40,7 +40,7 @@ def add_git_host_key(module, url, accept_hostkey=True, create_dir=True): """ idempotently add a git url hostkey """ - fqdn = get_fqdn(module.params['repo']) + fqdn = get_fqdn(url) if fqdn: known_host = check_hostkey(module, fqdn) @@ -72,12 +72,14 @@ def get_fqdn(repo_url): if 'ssh' not in parts[0] and 'git' not in parts[0]: # don't try and scan a hostname that's not ssh return None + # parts[1] will be empty on python2.4 on ssh:// or git:// urls, so + # ensure we actually have a parts[1] before continuing. if parts[1] != '': result = parts[1] if ":" in result: result = result.split(":")[0] - if "@" in result: - result = result.split("@", 1)[1] + if "@" in result: + result = result.split("@", 1)[1] return result diff --git a/lib/ansible/module_utils/powershell.ps1 b/lib/ansible/module_utils/powershell.ps1 index c097c69768b..ee7d3ddeca4 100644 --- a/lib/ansible/module_utils/powershell.ps1 +++ b/lib/ansible/module_utils/powershell.ps1 @@ -142,3 +142,25 @@ Function ConvertTo-Bool return } +# Helper function to calculate a hash of a file in a way which powershell 3 +# and above can handle: +Function Get-FileChecksum($path) +{ + $hash = "" + If (Test-Path -PathType Leaf $path) + { + $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider; + $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); + [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); + $fp.Dispose(); + } + ElseIf (Test-Path -PathType Container $path) + { + $hash= "3"; + } + Else + { + $hash = "1"; + } + return $hash +} diff --git a/lib/ansible/module_utils/rax.py b/lib/ansible/module_utils/rax.py index a8f33208caf..75363b1aacb 100644 --- a/lib/ansible/module_utils/rax.py +++ b/lib/ansible/module_utils/rax.py @@ -173,9 +173,9 @@ def rax_find_server(module, rax_module, server): def rax_find_loadbalancer(module, rax_module, loadbalancer): clb = rax_module.cloud_loadbalancers try: - UUID(loadbalancer) found = clb.get(loadbalancer) except: + found = [] for lb in clb.list(): if loadbalancer == lb.name: found.append(lb) diff --git a/lib/ansible/module_utils/splitter.py b/lib/ansible/module_utils/splitter.py index 41b337773ff..899fa8cd925 100644 --- a/lib/ansible/module_utils/splitter.py +++ b/lib/ansible/module_utils/splitter.py @@ -76,7 +76,7 @@ def split_args(args): do_decode = True except UnicodeDecodeError: do_decode = False - items = args.strip().split('\n') + items = args.split('\n') # iterate over the tokens, and reassemble any that may have been # split on a space inside a jinja2 block. @@ -138,7 +138,10 @@ def split_args(args): spacer = ' ' params[-1] = "%s%s%s" % (params[-1], spacer, token) else: - params[-1] = "%s\n%s" % (params[-1], token) + spacer = '' + if not params[-1].endswith('\n') and idx == 0: + spacer = '\n' + params[-1] = "%s%s%s" % (params[-1], spacer, token) appended = True # if the number of paired block tags is not the same, the depth has changed, so we calculate that here @@ -170,7 +173,7 @@ def split_args(args): # one item (meaning we split on newlines), add a newline back here # to preserve the original structure if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation: - if not params[-1].endswith('\n'): + if not params[-1].endswith('\n') or item == '': params[-1] += '\n' # always clear the line continuation flag diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 1280745cc98..962b868ee0d 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -219,6 +219,8 @@ class SSLValidationHandler(urllib2.BaseHandler): # Write the dummy ca cert if we are running on Mac OS X if platform == 'Darwin': os.write(tmp_fd, DUMMY_CA_CERT) + # Default Homebrew path for OpenSSL certs + paths_checked.append('/usr/local/etc/openssl') # for all of the paths, find any .crt or .pem files # and compile them into single temp file for use @@ -250,9 +252,33 @@ class SSLValidationHandler(urllib2.BaseHandler): except: self.module.fail_json(msg='Connection to proxy failed') + def detect_no_proxy(self, url): + ''' + Detect if the 'no_proxy' environment variable is set and honor those locations. + ''' + env_no_proxy = os.environ.get('no_proxy') + if env_no_proxy: + env_no_proxy = env_no_proxy.split(',') + netloc = urlparse.urlparse(url).netloc + + for host in env_no_proxy: + if netloc.endswith(host) or netloc.split(':')[0].endswith(host): + # Our requested URL matches something in no_proxy, so don't + # use the proxy for this + return False + return True + def http_request(self, req): tmp_ca_cert_path, paths_checked = self.get_ca_certs() https_proxy = os.environ.get('https_proxy') + + # Detect if 'no_proxy' environment variable is set and if our URL is included + use_proxy = self.detect_no_proxy(req.get_full_url()) + + if not use_proxy: + # ignore proxy settings for this host request + return req + try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if https_proxy: diff --git a/lib/ansible/modules/__init__.py b/lib/ansible/modules/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core new file mode 160000 index 00000000000..095f8681dbd --- /dev/null +++ b/lib/ansible/modules/core @@ -0,0 +1 @@ +Subproject commit 095f8681dbdfd2e9247446822e953287c9bca66c diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras new file mode 160000 index 00000000000..d94d0ce70b5 --- /dev/null +++ b/lib/ansible/modules/extras @@ -0,0 +1 @@ +Subproject commit d94d0ce70b5db5ecfafbc73bebc822c9e18734f3 diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 4acb15a651f..7f4fd8cb1f3 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -21,6 +21,7 @@ import ansible.runner from ansible.utils.template import template from ansible import utils from ansible import errors +from ansible.module_utils.splitter import split_args, unquote import ansible.callbacks import ansible.cache import os @@ -209,12 +210,15 @@ class PlayBook(object): name and returns the merged vars along with the path ''' new_vars = existing_vars.copy() - tokens = shlex.split(play_ds.get('include', '')) + tokens = split_args(play_ds.get('include', '')) for t in tokens[1:]: - (k,v) = t.split("=", 1) - new_vars[k] = template(basedir, v, new_vars) + try: + (k,v) = unquote(t).split("=", 1) + new_vars[k] = template(basedir, v, new_vars) + except ValueError, e: + raise errors.AnsibleError('included playbook variables must be in the form k=v, got: %s' % t) - return (new_vars, tokens[0]) + return (new_vars, unquote(tokens[0])) # ***************************************************** @@ -395,6 +399,10 @@ class PlayBook(object): remote_user=task.remote_user, remote_port=task.play.remote_port, module_vars=task.module_vars, + play_vars=task.play_vars, + play_file_vars=task.play_file_vars, + role_vars=task.role_vars, + role_params=task.role_params, default_vars=task.default_vars, extra_vars=self.extra_vars, private_key_file=self.private_key_file, @@ -496,7 +504,7 @@ class PlayBook(object): def _save_play_facts(host, facts): # saves play facts in SETUP_CACHE, unless the module executed was # set_fact, in which case we add them to the VARS_CACHE - if task.module_name == 'set_fact': + if task.module_name in ('set_fact', 'include_vars'): utils.update_hash(self.VARS_CACHE, host, facts) else: utils.update_hash(self.SETUP_CACHE, host, facts) @@ -601,6 +609,9 @@ class PlayBook(object): transport=play.transport, is_playbook=True, module_vars=play.vars, + play_vars=play.vars, + play_file_vars=play.vars_file_vars, + role_vars=play.role_vars, default_vars=play.default_vars, check=self.check, diff=self.diff, @@ -632,19 +643,28 @@ class PlayBook(object): buf = StringIO.StringIO() for x in replay_hosts: buf.write("%s\n" % x) - basedir = self.inventory.basedir() + basedir = C.shell_expand_path(C.RETRY_FILES_SAVE_PATH) filename = "%s.retry" % os.path.basename(self.filename) filename = filename.replace(".yml","") - filename = os.path.join(os.path.expandvars('$HOME/'), filename) + filename = os.path.join(basedir, filename) try: + if not os.path.exists(basedir): + os.makedirs(basedir) + fd = open(filename, 'w') fd.write(buf.getvalue()) fd.close() - return filename except: - pass - return None + ansible.callbacks.display( + "\nERROR: could not create retry file. Check the value of \n" + + "the configuration variable 'retry_files_save_path' or set \n" + + "'retry_files_enabled' to False to avoid this message.\n", + color='red' + ) + return None + + return filename # ***************************************************** diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 6f51ac832bb..74aa6a9f798 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -33,12 +33,12 @@ import uuid class Play(object): __slots__ = [ - 'hosts', 'name', 'vars', 'default_vars', 'vars_prompt', 'vars_files', + 'hosts', 'name', 'vars', 'vars_file_vars', 'role_vars', 'default_vars', 'vars_prompt', 'vars_files', 'handlers', 'remote_user', 'remote_port', 'included_roles', 'accelerate', 'accelerate_port', 'accelerate_ipv6', 'sudo', 'sudo_user', 'transport', 'playbook', 'tags', 'gather_facts', 'serial', '_ds', '_handlers', '_tasks', 'basedir', 'any_errors_fatal', 'roles', 'max_fail_pct', '_play_hosts', 'su', 'su_user', - 'vault_password', 'no_log', + 'vault_password', 'no_log', 'environment', ] # to catch typos and so forth -- these are userland names @@ -48,7 +48,7 @@ class Play(object): 'tasks', 'handlers', 'remote_user', 'user', 'port', 'include', 'accelerate', 'accelerate_port', 'accelerate_ipv6', 'sudo', 'sudo_user', 'connection', 'tags', 'gather_facts', 'serial', 'any_errors_fatal', 'roles', 'role_names', 'pre_tasks', 'post_tasks', 'max_fail_percentage', - 'su', 'su_user', 'vault_password', 'no_log', + 'su', 'su_user', 'vault_password', 'no_log', 'environment', ] # ************************************************* @@ -65,10 +65,13 @@ class Play(object): self.vars_prompt = ds.get('vars_prompt', {}) self.playbook = playbook self.vars = self._get_vars() + self.vars_file_vars = dict() # these are vars read in from vars_files: + self.role_vars = dict() # these are vars read in from vars/main.yml files in roles self.basedir = basedir self.roles = ds.get('roles', None) self.tags = ds.get('tags', None) self.vault_password = vault_password + self.environment = ds.get('environment', {}) if self.tags is None: self.tags = [] @@ -77,12 +80,14 @@ class Play(object): elif type(self.tags) != list: self.tags = [] - # make sure we have some special internal variables set - self.vars['playbook_dir'] = os.path.abspath(self.basedir) + # make sure we have some special internal variables set, which + # we use later when loading tasks and handlers + load_vars = dict() + load_vars['playbook_dir'] = os.path.abspath(self.basedir) if self.playbook.inventory.basedir() is not None: - self.vars['inventory_dir'] = self.playbook.inventory.basedir() + load_vars['inventory_dir'] = self.playbook.inventory.basedir() if self.playbook.inventory.src() is not None: - self.vars['inventory_file'] = self.playbook.inventory.src() + load_vars['inventory_file'] = self.playbook.inventory.src() # We first load the vars files from the datastructure # so we have the default variables to pass into the roles @@ -103,15 +108,17 @@ class Play(object): self._update_vars_files_for_host(None) - # apply any extra_vars specified on the command line now - if type(self.playbook.extra_vars) == dict: - self.vars = utils.combine_vars(self.vars, self.playbook.extra_vars) - # template everything to be efficient, but do not pre-mature template - # tasks/handlers as they may have inventory scope overrides + # tasks/handlers as they may have inventory scope overrides. We also + # create a set of temporary variables for templating, so we don't + # trample on the existing vars structures _tasks = ds.pop('tasks', []) _handlers = ds.pop('handlers', []) - ds = template(basedir, ds, self.vars) + + temp_vars = utils.merge_hash(self.vars, self.vars_file_vars) + temp_vars = utils.merge_hash(temp_vars, self.playbook.extra_vars) + + ds = template(basedir, ds, temp_vars) ds['tasks'] = _tasks ds['handlers'] = _handlers @@ -121,7 +128,11 @@ class Play(object): if hosts is None: raise errors.AnsibleError('hosts declaration is required') elif isinstance(hosts, list): - hosts = ';'.join(hosts) + try: + hosts = ';'.join(hosts) + except TypeError,e: + raise errors.AnsibleError('improper host declaration: %s' % str(e)) + self.serial = str(ds.get('serial', 0)) self.hosts = hosts self.name = ds.get('name', self.hosts) @@ -154,8 +165,7 @@ class Play(object): raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params ' '("su", "su_user") cannot be used together') - load_vars = {} - load_vars['role_names'] = ds.get('role_names',[]) + load_vars['role_names'] = ds.get('role_names', []) self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars) self._handlers = self._load_tasks(self._ds.get('handlers', []), load_vars) @@ -218,7 +228,16 @@ class Play(object): raise errors.AnsibleError("too many levels of recursion while resolving role dependencies") for role in roles: role_path,role_vars = self._get_role_path(role) + + # save just the role params for this role, which exclude the special + # keywords 'role', 'tags', and 'when'. + role_params = role_vars.copy() + for item in ('role', 'tags', 'when'): + if item in role_params: + del role_params[item] + role_vars = utils.combine_vars(passed_vars, role_vars) + vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'vars'))) vars_data = {} if os.path.isfile(vars): @@ -227,10 +246,12 @@ class Play(object): if not isinstance(vars_data, dict): raise errors.AnsibleError("vars from '%s' are not a dict" % vars) role_vars = utils.combine_vars(vars_data, role_vars) + defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults'))) defaults_data = {} if os.path.isfile(defaults): defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password) + # the meta directory contains the yaml that should # hold the list of dependencies (if any) meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'meta'))) @@ -243,6 +264,13 @@ class Play(object): for dep in dependencies: allow_dupes = False (dep_path,dep_vars) = self._get_role_path(dep) + + # save the dep params, just as we did above + dep_params = dep_vars.copy() + for item in ('role', 'tags', 'when'): + if item in dep_params: + del dep_params[item] + meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'meta'))) if os.path.isfile(meta): meta_data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password) @@ -282,12 +310,15 @@ class Play(object): dep_vars = utils.combine_vars(passed_vars, dep_vars) dep_vars = utils.combine_vars(role_vars, dep_vars) + vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'vars'))) vars_data = {} if os.path.isfile(vars): vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password) if vars_data: - dep_vars = utils.combine_vars(vars_data, dep_vars) + dep_vars = utils.combine_vars(dep_vars, vars_data) + pass + defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'defaults'))) dep_defaults_data = {} if os.path.isfile(defaults): @@ -323,15 +354,28 @@ class Play(object): dep_vars['when'] = tmpcond self._build_role_dependencies([dep], dep_stack, passed_vars=dep_vars, level=level+1) - dep_stack.append([dep,dep_path,dep_vars,dep_defaults_data]) + dep_stack.append([dep, dep_path, dep_vars, dep_params, dep_defaults_data]) # only add the current role when we're at the top level, # otherwise we'll end up in a recursive loop if level == 0: self.included_roles.append(role) - dep_stack.append([role,role_path,role_vars,defaults_data]) + dep_stack.append([role, role_path, role_vars, role_params, defaults_data]) return dep_stack + def _load_role_vars_files(self, vars_files): + # process variables stored in vars/main.yml files + role_vars = {} + for filename in vars_files: + if os.path.exists(filename): + new_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password) + if new_vars: + if type(new_vars) != dict: + raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_vars))) + role_vars = utils.combine_vars(role_vars, new_vars) + + return role_vars + def _load_role_defaults(self, defaults_files): # process default variables default_vars = {} @@ -358,10 +402,10 @@ class Play(object): if type(roles) != list: raise errors.AnsibleError("value of 'roles:' must be a list") - new_tasks = [] - new_handlers = [] - new_vars_files = [] - defaults_files = [] + new_tasks = [] + new_handlers = [] + role_vars_files = [] + defaults_files = [] pre_tasks = ds.get('pre_tasks', None) if type(pre_tasks) != list: @@ -372,18 +416,18 @@ class Play(object): # flush handlers after pre_tasks new_tasks.append(dict(meta='flush_handlers')) - roles = self._build_role_dependencies(roles, [], self.vars) + roles = self._build_role_dependencies(roles, [], {}) # give each role an uuid and # make role_path available as variable to the task for idx, val in enumerate(roles): this_uuid = str(uuid.uuid4()) - roles[idx][-2]['role_uuid'] = this_uuid - roles[idx][-2]['role_path'] = roles[idx][1] + roles[idx][-3]['role_uuid'] = this_uuid + roles[idx][-3]['role_path'] = roles[idx][1] role_names = [] - for (role,role_path,role_vars,default_vars) in roles: + for (role, role_path, role_vars, role_params, default_vars) in roles: # special vars must be extracted from the dict to the included tasks special_keys = [ "sudo", "sudo_user", "when", "with_items" ] special_vars = {} @@ -416,19 +460,19 @@ class Play(object): role_names.append(role_name) if os.path.isfile(task): - nt = dict(include=pipes.quote(task), vars=role_vars, default_vars=default_vars, role_name=role_name) + nt = dict(include=pipes.quote(task), vars=role_vars, role_params=role_params, default_vars=default_vars, role_name=role_name) for k in special_keys: if k in special_vars: nt[k] = special_vars[k] new_tasks.append(nt) if os.path.isfile(handler): - nt = dict(include=pipes.quote(handler), vars=role_vars, role_name=role_name) + nt = dict(include=pipes.quote(handler), vars=role_vars, role_params=role_params, role_name=role_name) for k in special_keys: if k in special_vars: nt[k] = special_vars[k] new_handlers.append(nt) if os.path.isfile(vars_file): - new_vars_files.append(vars_file) + role_vars_files.append(vars_file) if os.path.isfile(defaults_file): defaults_files.append(defaults_file) if os.path.isdir(library): @@ -456,13 +500,12 @@ class Play(object): new_tasks.append(dict(meta='flush_handlers')) new_handlers.extend(handlers) - new_vars_files.extend(vars_files) ds['tasks'] = new_tasks ds['handlers'] = new_handlers - ds['vars_files'] = new_vars_files ds['role_names'] = role_names + self.role_vars = self._load_role_vars_files(role_vars_files) self.default_vars = self._load_role_defaults(defaults_files) return ds @@ -488,7 +531,7 @@ class Play(object): # ************************************************* - def _load_tasks(self, tasks, vars=None, default_vars=None, sudo_vars=None, + def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, sudo_vars=None, additional_conditions=None, original_file=None, role_name=None): ''' handle task and handler include statements ''' @@ -500,6 +543,8 @@ class Play(object): additional_conditions = [] if vars is None: vars = {} + if role_params is None: + role_params = {} if default_vars is None: default_vars = {} if sudo_vars is None: @@ -529,8 +574,7 @@ class Play(object): results.append(Task(self, x)) continue - task_vars = self.vars.copy() - task_vars.update(vars) + task_vars = vars.copy() if original_file: task_vars['_original_file'] = original_file @@ -552,11 +596,15 @@ class Play(object): included_additional_conditions.append(x[k]) elif type(x[k]) is list: included_additional_conditions.extend(x[k]) - elif k in ("include", "vars", "default_vars", "sudo", "sudo_user", "role_name", "no_log"): + elif k in ("include", "vars", "role_params", "default_vars", "sudo", "sudo_user", "role_name", "no_log"): continue else: include_vars[k] = x[k] + # get any role parameters specified + role_params = x.get('role_params', {}) + + # get any role default variables specified default_vars = x.get('default_vars', {}) if not default_vars: default_vars = self.default_vars @@ -582,19 +630,29 @@ class Play(object): dirname = self.basedir if original_file: dirname = os.path.dirname(original_file) - include_file = template(dirname, tokens[0], mv) + + # temp vars are used here to avoid trampling on the existing vars structures + temp_vars = utils.merge_hash(self.vars, self.vars_file_vars) + temp_vars = utils.merge_hash(temp_vars, mv) + temp_vars = utils.merge_hash(temp_vars, self.playbook.extra_vars) + include_file = template(dirname, tokens[0], temp_vars) include_filename = utils.path_dwim(dirname, include_file) + data = utils.parse_yaml_from_file(include_filename, vault_password=self.vault_password) if 'role_name' in x and data is not None: for y in data: if isinstance(y, dict) and 'include' in y: y['role_name'] = new_role - loaded = self._load_tasks(data, mv, default_vars, included_sudo_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role) + loaded = self._load_tasks(data, mv, role_params, default_vars, included_sudo_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role) results += loaded elif type(x) == dict: task = Task( self, x, module_vars=task_vars, + play_vars=self.vars, + play_file_vars=self.vars_file_vars, + role_vars=self.role_vars, + role_params=role_params, default_vars=default_vars, additional_conditions=list(additional_conditions), role_name=role_name @@ -812,7 +870,7 @@ class Play(object): target_filename = filename4 update_vars_cache(host, data, target_filename=target_filename) else: - self.vars = utils.combine_vars(self.vars, data) + self.vars_file_vars = utils.combine_vars(self.vars_file_vars, data) # we did process this file return True # we did not process this file diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 1570b88a4df..bdffba5527c 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -26,7 +26,7 @@ class Task(object): __slots__ = [ 'name', 'meta', 'action', 'when', 'async_seconds', 'async_poll_interval', - 'notify', 'module_name', 'module_args', 'module_vars', 'default_vars', + 'notify', 'module_name', 'module_args', 'module_vars', 'play_vars', 'play_file_vars', 'role_vars', 'role_params', 'default_vars', 'play', 'notified_by', 'tags', 'register', 'role_name', 'delegate_to', 'first_available_file', 'ignore_errors', 'local_action', 'transport', 'sudo', 'remote_user', 'sudo_user', 'sudo_pass', @@ -45,7 +45,7 @@ class Task(object): 'su', 'su_user', 'su_pass', 'no_log', 'run_once', ] - def __init__(self, play, ds, module_vars=None, default_vars=None, additional_conditions=None, role_name=None): + def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=None, role_vars=None, role_params=None, default_vars=None, additional_conditions=None, role_name=None): ''' constructor loads from a task or handler datastructure ''' # meta directives are used to tell things like ansible/playbook to run @@ -84,9 +84,13 @@ class Task(object): # code to allow "with_glob" and to reference a lookup plugin named glob elif x.startswith("with_"): - - if isinstance(ds[x], basestring) and ds[x].lstrip().startswith("{{"): - utils.warning("It is unnecessary to use '{{' in loops, leave variables in loop expressions bare.") + if isinstance(ds[x], basestring): + param = ds[x].strip() + # Only a variable, no logic + if (param.startswith('{{') and + param.find('}}') == len(ds[x]) - 2 and + param.find('|') == -1): + utils.warning("It is unnecessary to use '{{' in loops, leave variables in loop expressions bare.") plugin_name = x.replace("with_","") if plugin_name in utils.plugins.lookup_loader: @@ -97,8 +101,13 @@ class Task(object): raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name)) elif x in [ 'changed_when', 'failed_when', 'when']: - if isinstance(ds[x], basestring) and ds[x].lstrip().startswith("{{"): - utils.warning("It is unnecessary to use '{{' in conditionals, leave variables in loop expressions bare.") + if isinstance(ds[x], basestring): + param = ds[x].strip() + # Only a variable, no logic + if (param.startswith('{{') and + param.find('}}') == len(ds[x]) - 2 and + param.find('|') == -1): + utils.warning("It is unnecessary to use '{{' in conditionals, leave variables in loop expressions bare.") elif x.startswith("when_"): utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True) @@ -110,9 +119,13 @@ class Task(object): elif not x in Task.VALID_KEYS: raise errors.AnsibleError("%s is not a legal parameter in an Ansible task or handler" % x) - self.module_vars = module_vars - self.default_vars = default_vars - self.play = play + self.module_vars = module_vars + self.play_vars = play_vars + self.play_file_vars = play_file_vars + self.role_vars = role_vars + self.role_params = role_params + self.default_vars = default_vars + self.play = play # load various attributes self.name = ds.get('name', None) @@ -120,7 +133,7 @@ class Task(object): self.register = ds.get('register', None) self.sudo = utils.boolean(ds.get('sudo', play.sudo)) self.su = utils.boolean(ds.get('su', play.su)) - self.environment = ds.get('environment', {}) + self.environment = ds.get('environment', play.environment) self.role_name = role_name self.no_log = utils.boolean(ds.get('no_log', "false")) or self.play.no_log self.run_once = utils.boolean(ds.get('run_once', 'false')) @@ -210,7 +223,11 @@ class Task(object): # combine the default and module vars here for use in templating all_vars = self.default_vars.copy() + all_vars = utils.combine_vars(all_vars, self.play_vars) + all_vars = utils.combine_vars(all_vars, self.play_file_vars) + all_vars = utils.combine_vars(all_vars, self.role_vars) all_vars = utils.combine_vars(all_vars, self.module_vars) + all_vars = utils.combine_vars(all_vars, self.role_params) self.async_seconds = ds.get('async', 0) # not async by default self.async_seconds = template.template_from_string(play.basedir, self.async_seconds, all_vars) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 54ee93a8d77..63e4ed27411 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -53,9 +53,9 @@ from ansible.utils import update_hash module_replacer = ModuleReplacer(strip_comments=False) try: - from hashlib import md5 as _md5 + from hashlib import sha1 except ImportError: - from md5 import md5 as _md5 + from sha import sha as sha1 HAS_ATFORK=True try: @@ -102,7 +102,7 @@ class HostVars(dict): if host not in self.lookup: result = self.inventory.get_variables(host, vault_password=self.vault_password).copy() result.update(self.vars_cache.get(host, {})) - self.lookup[host] = result + self.lookup[host] = template.template('.', result, self.vars_cache) return self.lookup[host] @@ -134,7 +134,11 @@ class Runner(object): sudo=False, # whether to run sudo or not sudo_user=C.DEFAULT_SUDO_USER, # ex: 'root' module_vars=None, # a playbooks internals thing - default_vars=None, # ditto + play_vars=None, # + play_file_vars=None, # + role_vars=None, # + role_params=None, # + default_vars=None, # extra_vars=None, # extra vars specified with he playbook(s) is_playbook=False, # running from playbook or not? inventory=None, # reference to Inventory object @@ -154,6 +158,7 @@ class Runner(object): run_hosts=None, # an optional list of pre-calculated hosts to run on no_log=False, # option to enable/disable logging for a given task run_once=False, # option to enable/disable host bypass loop for a given task + sudo_exe=C.DEFAULT_SUDO_EXE, # ex: /usr/local/bin/sudo ): # used to lock multiprocess inputs and outputs at various levels @@ -175,12 +180,17 @@ class Runner(object): self.inventory = utils.default(inventory, lambda: ansible.inventory.Inventory(host_list)) self.module_vars = utils.default(module_vars, lambda: {}) + self.play_vars = utils.default(play_vars, lambda: {}) + self.play_file_vars = utils.default(play_file_vars, lambda: {}) + self.role_vars = utils.default(role_vars, lambda: {}) + self.role_params = utils.default(role_params, lambda: {}) self.default_vars = utils.default(default_vars, lambda: {}) self.extra_vars = utils.default(extra_vars, lambda: {}) self.always_run = None self.connector = connection.Connector(self) self.conditional = conditional + self.delegate_to = None self.module_name = module_name self.forks = int(forks) self.pattern = pattern @@ -207,20 +217,28 @@ class Runner(object): self.su_user_var = su_user self.su_user = None self.su_pass = su_pass - self.omit_token = '__omit_place_holder__%s' % _md5(os.urandom(64)).hexdigest() + self.omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest() self.vault_pass = vault_pass self.no_log = no_log self.run_once = run_once + self.sudo_exe = sudo_exe if self.transport == 'smart': - # if the transport is 'smart' see if SSH can support ControlPersist if not use paramiko + # If the transport is 'smart', check to see if certain conditions + # would prevent us from using ssh, and fallback to paramiko. # 'smart' is the default since 1.2.1/1.3 - cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (out, err) = cmd.communicate() - if "Bad configuration option" in err: + self.transport = "ssh" + if sys.platform.startswith('darwin') and self.remote_pass: + # due to a current bug in sshpass on OSX, which can trigger + # a kernel panic even for non-privileged users, we revert to + # paramiko on that OS when a SSH password is specified self.transport = "paramiko" else: - self.transport = "ssh" + # see if SSH can support ControlPersist if not use paramiko + cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (out, err) = cmd.communicate() + if "Bad configuration option" in err: + self.transport = "paramiko" # save the original transport, in case it gets # changed later via options like accelerate @@ -312,16 +330,13 @@ class Runner(object): # ***************************************************** - def _compute_delegate(self, host, password, remote_inject): + def _compute_delegate(self, password, remote_inject): """ Build a dictionary of all attributes for the delegate host """ delegate = {} # allow delegated host to be templated - delegate['host'] = template.template(self.basedir, host, - remote_inject, fail_on_undefined=True) - delegate['inject'] = remote_inject.copy() # set any interpreters @@ -333,36 +348,33 @@ class Runner(object): del delegate['inject'][i] port = C.DEFAULT_REMOTE_PORT - this_host = delegate['host'] - # get the vars for the delegate by its name try: - this_info = delegate['inject']['hostvars'][this_host] + this_info = delegate['inject']['hostvars'][self.delegate_to] except: # make sure the inject is empty for non-inventory hosts this_info = {} # get the real ssh_address for the delegate # and allow ansible_ssh_host to be templated - delegate['ssh_host'] = template.template(self.basedir, - this_info.get('ansible_ssh_host', this_host), - this_info, fail_on_undefined=True) + delegate['ssh_host'] = template.template( + self.basedir, + this_info.get('ansible_ssh_host', self.delegate_to), + this_info, + fail_on_undefined=True + ) delegate['port'] = this_info.get('ansible_ssh_port', port) - - delegate['user'] = self._compute_delegate_user(this_host, delegate['inject']) - + delegate['user'] = self._compute_delegate_user(self.delegate_to, delegate['inject']) delegate['pass'] = this_info.get('ansible_ssh_pass', password) - delegate['private_key_file'] = this_info.get('ansible_ssh_private_key_file', - self.private_key_file) + delegate['private_key_file'] = this_info.get('ansible_ssh_private_key_file', self.private_key_file) delegate['transport'] = this_info.get('ansible_connection', self.transport) delegate['sudo_pass'] = this_info.get('ansible_sudo_pass', self.sudo_pass) # Last chance to get private_key_file from global variables. # this is useful if delegated host is not defined in the inventory if delegate['private_key_file'] is None: - delegate['private_key_file'] = remote_inject.get( - 'ansible_ssh_private_key_file', None) + delegate['private_key_file'] = remote_inject.get('ansible_ssh_private_key_file', None) if delegate['private_key_file'] is not None: delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file']) @@ -382,10 +394,20 @@ class Runner(object): actual_user = inject.get('ansible_ssh_user', self.remote_user) thisuser = None - if host in inject['hostvars']: - if inject['hostvars'][host].get('ansible_ssh_user'): - # user for delegate host in inventory - thisuser = inject['hostvars'][host].get('ansible_ssh_user') + try: + if host in inject['hostvars']: + if inject['hostvars'][host].get('ansible_ssh_user'): + # user for delegate host in inventory + thisuser = inject['hostvars'][host].get('ansible_ssh_user') + else: + # look up the variables for the host directly from inventory + host_vars = self.inventory.get_variables(host, vault_password=self.vault_pass) + if 'ansible_ssh_user' in host_vars: + thisuser = host_vars['ansible_ssh_user'] + except errors.AnsibleError, e: + # the hostname was not found in the inventory, so + # we just ignore this and try the next method + pass if thisuser is None and self.remote_user: # user defined by play/runner @@ -583,24 +605,14 @@ class Runner(object): # ***************************************************** - def _executor_internal(self, host, new_stdin): - ''' executes any module one or more times ''' - - host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass) - host_connection = host_variables.get('ansible_connection', self.transport) - if host_connection in [ 'paramiko', 'ssh', 'accelerate' ]: - port = host_variables.get('ansible_ssh_port', self.remote_port) - if port is None: - port = C.DEFAULT_REMOTE_PORT - else: - # fireball, local, etc - port = self.remote_port - + def get_combined_cache(self): # merge the VARS and SETUP caches for this host combined_cache = self.setup_cache.copy() - combined_cache = utils.merge_hash(combined_cache, self.vars_cache) + return utils.merge_hash(combined_cache, self.vars_cache) - hostvars = HostVars(combined_cache, self.inventory, vault_password=self.vault_pass) + def get_inject_vars(self, host): + host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass) + combined_cache = self.get_combined_cache() # use combined_cache and host_variables to template the module_vars # we update the inject variables with the data we're about to template @@ -609,28 +621,78 @@ class Runner(object): module_vars_inject = utils.combine_vars(self.module_vars, module_vars_inject) module_vars = template.template(self.basedir, self.module_vars, module_vars_inject) + # remove bad variables from the module vars, which may be in there due + # the way role declarations are specified in playbooks + if 'tags' in module_vars: + del module_vars['tags'] + if 'when' in module_vars: + del module_vars['when'] + + # start building the dictionary of injected variables inject = {} + # default vars are the lowest priority inject = utils.combine_vars(inject, self.default_vars) + # next come inventory variables for the host inject = utils.combine_vars(inject, host_variables) + # then the setup_cache which contains facts gathered inject = utils.combine_vars(inject, self.setup_cache.get(host, {})) + # next come variables from vars and vars files + inject = utils.combine_vars(inject, self.play_vars) + inject = utils.combine_vars(inject, self.play_file_vars) + # next come variables from role vars/main.yml files + inject = utils.combine_vars(inject, self.role_vars) + # then come the module variables inject = utils.combine_vars(inject, module_vars) + # followed by vars_cache things (set_fact, include_vars, and + # vars_files which had host-specific templating done) inject = utils.combine_vars(inject, self.vars_cache.get(host, {})) + # role parameters next + inject = utils.combine_vars(inject, self.role_params) + # and finally -e vars are the highest priority inject = utils.combine_vars(inject, self.extra_vars) + # and then special vars inject.setdefault('ansible_ssh_user', self.remote_user) - inject['hostvars'] = hostvars - inject['group_names'] = host_variables.get('group_names', []) - inject['groups'] = self.inventory.groups_list() - inject['vars'] = self.module_vars - inject['defaults'] = self.default_vars - inject['environment'] = self.environment + inject['group_names'] = host_variables.get('group_names', []) + inject['groups'] = self.inventory.groups_list() + inject['vars'] = self.module_vars + inject['defaults'] = self.default_vars + inject['environment'] = self.environment inject['playbook_dir'] = os.path.abspath(self.basedir) - inject['omit'] = self.omit_token + inject['omit'] = self.omit_token + inject['combined_cache'] = combined_cache - # template this one is available, callbacks use this - delegate_to = self.module_vars.get('delegate_to') - if delegate_to: - self.module_vars['delegate_to'] = template.template(self.basedir, delegate_to, inject) + return inject + + def _executor_internal(self, host, new_stdin): + ''' executes any module one or more times ''' + + # We build the proper injected dictionary for all future + # templating operations in this run + inject = self.get_inject_vars(host) + + # Then we selectively merge some variable dictionaries down to a + # single dictionary, used to template the HostVars for this host + temp_vars = self.inventory.get_variables(host, vault_password=self.vault_pass) + temp_vars = utils.merge_hash(temp_vars, inject['combined_cache']) + temp_vars = utils.merge_hash(temp_vars, self.play_vars) + temp_vars = utils.merge_hash(temp_vars, self.play_file_vars) + temp_vars = utils.merge_hash(temp_vars, self.extra_vars) + + hostvars = HostVars(temp_vars, self.inventory, vault_password=self.vault_pass) + + # and we save the HostVars in the injected dictionary so they + # may be referenced from playbooks/templates + inject['hostvars'] = hostvars + + host_connection = inject.get('ansible_connection', self.transport) + if host_connection in [ 'paramiko', 'ssh', 'accelerate' ]: + port = hostvars.get('ansible_ssh_port', self.remote_port) + if port is None: + port = C.DEFAULT_REMOTE_PORT + else: + # fireball, local, etc + port = self.remote_port if self.inventory.basedir() is not None: inject['inventory_dir'] = self.inventory.basedir() @@ -654,24 +716,46 @@ class Runner(object): if os.path.exists(filesdir): basedir = filesdir - items_terms = self.module_vars.get('items_lookup_terms', '') - items_terms = template.template(basedir, items_terms, inject) - items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=basedir).run(items_terms, inject=inject) + try: + items_terms = self.module_vars.get('items_lookup_terms', '') + items_terms = template.template(basedir, items_terms, inject) + items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=basedir).run(items_terms, inject=inject) + except errors.AnsibleUndefinedVariable, e: + if 'has no attribute' in str(e): + # the undefined variable was an attribute of a variable that does + # exist, so try and run this through the conditional check to see + # if the user wanted to skip something on being undefined + if utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=True): + # the conditional check passed, so we have to fail here + raise + else: + # the conditional failed, so we skip this task + result = utils.jsonify(dict(changed=False, skipped=True)) + self.callbacks.on_skipped(host, None) + return ReturnData(host=host, result=result) + except errors.AnsibleError, e: + raise + except Exception, e: + raise errors.AnsibleError("Unexpected error while executing task: %s" % str(e)) + # strip out any jinja2 template syntax within # the data returned by the lookup plugin items = utils._clean_data_struct(items, from_remote=True) - if type(items) != list: - raise errors.AnsibleError("lookup plugins have to return a list: %r" % items) + if items is None: + items = [] + else: + if type(items) != list: + raise errors.AnsibleError("lookup plugins have to return a list: %r" % items) - if len(items) and utils.is_list_of_strings(items) and self.module_name in [ 'apt', 'yum', 'pkgng' ]: - # hack for apt, yum, and pkgng so that with_items maps back into a single module call - use_these_items = [] - for x in items: - inject['item'] = x - if not self.conditional or utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): - use_these_items.append(x) - inject['item'] = ",".join(use_these_items) - items = None + if len(items) and utils.is_list_of_strings(items) and self.module_name in [ 'apt', 'yum', 'pkgng', 'zypper' ]: + # hack for apt, yum, and pkgng so that with_items maps back into a single module call + use_these_items = [] + for x in items: + inject['item'] = x + if not self.conditional or utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): + use_these_items.append(x) + inject['item'] = ",".join(use_these_items) + items = None def _safe_template_complex_args(args, inject): # Ensure the complex args here are a dictionary, but @@ -733,6 +817,10 @@ class Runner(object): port, complex_args=complex_args ) + + if 'stdout' in result.result and 'stdout_lines' not in result.result: + result.result['stdout_lines'] = result.result['stdout'].splitlines() + results.append(result.result) if result.comm_ok == False: all_comm_ok = False @@ -805,6 +893,7 @@ class Runner(object): self.sudo_pass = inject.get('ansible_sudo_pass', self.sudo_pass) self.su = inject.get('ansible_su', self.su) self.su_pass = inject.get('ansible_su_pass', self.su_pass) + self.sudo_exe = inject.get('ansible_sudo_exe', self.sudo_exe) # select default root user in case self.sudo requested # but no user specified; happens e.g. in host vars when @@ -831,9 +920,12 @@ class Runner(object): # the delegated host may have different SSH port configured, etc # and we need to transfer those, and only those, variables - delegate_to = inject.get('delegate_to', None) - if delegate_to is not None: - delegate = self._compute_delegate(delegate_to, actual_pass, inject) + self.delegate_to = inject.get('delegate_to', None) + if self.delegate_to: + self.delegate_to = template.template(self.basedir, self.delegate_to, inject) + + if self.delegate_to is not None: + delegate = self._compute_delegate(actual_pass, inject) actual_transport = delegate['transport'] actual_host = delegate['ssh_host'] actual_port = delegate['port'] @@ -842,6 +934,8 @@ class Runner(object): actual_private_key_file = delegate['private_key_file'] self.sudo_pass = delegate['sudo_pass'] inject = delegate['inject'] + # set resolved delegate_to into inject so modules can call _remote_checksum + inject['delegate_to'] = self.delegate_to # user/pass may still contain variables at this stage actual_user = template.template(self.basedir, actual_user, inject) @@ -865,7 +959,7 @@ class Runner(object): try: conn = self.connector.connect(actual_host, actual_port, actual_user, actual_pass, actual_transport, actual_private_key_file) - if delegate_to or host != actual_host: + if self.delegate_to or host != actual_host: conn.delegate = host default_shell = getattr(conn, 'default_shell', '') @@ -898,7 +992,7 @@ class Runner(object): # render module_args and complex_args templates try: # When templating module_args, we need to be careful to ensure - # that no variables inadvertantly (or maliciously) add params + # that no variables inadvertently (or maliciously) add params # to the list of args. We do this by counting the number of k=v # pairs before and after templating. num_args_pre = self._count_module_args(module_args, allow_dupes=True) @@ -942,7 +1036,7 @@ class Runner(object): cond = template.template(self.basedir, until, inject, expand_lists=False) if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): - retries = self.module_vars.get('retries') + retries = template.template(self.basedir, self.module_vars.get('retries'), inject, expand_lists=False) delay = self.module_vars.get('delay') for x in range(1, int(retries) + 1): # template the delay, cast to float and sleep @@ -1108,26 +1202,77 @@ class Runner(object): # ***************************************************** - def _remote_md5(self, conn, tmp, path): - ''' takes a remote md5sum without requiring python, and returns 1 if no file ''' - cmd = conn.shell.md5(path) + def _remote_expand_user(self, conn, path, tmp): + ''' takes a remote path and performs tilde expansion on the remote host ''' + if not path.startswith('~'): + return path + + split_path = path.split(os.path.sep, 1) + expand_path = split_path[0] + if expand_path == '~': + if self.sudo and self.sudo_user: + expand_path = '~%s' % self.sudo_user + elif self.su and self.su_user: + expand_path = '~%s' % self.su_user + + cmd = conn.shell.expand_user(expand_path) + data = self._low_level_exec_command(conn, cmd, tmp, sudoable=False, su=False) + initial_fragment = utils.last_non_blank_line(data['stdout']) + + if not initial_fragment: + # Something went wrong trying to expand the path remotely. Return + # the original string + return path + + if len(split_path) > 1: + return conn.shell.join_path(initial_fragment, *split_path[1:]) + else: + return initial_fragment + + # ***************************************************** + + def _remote_checksum(self, conn, tmp, path, inject): + ''' takes a remote checksum and returns 1 if no file ''' + + # Lookup the python interp from the host or delegate + + # host == inven_host when there is no delegate + host = inject['inventory_hostname'] + if 'delegate_to' in inject: + delegate = inject['delegate_to'] + if delegate: + # host == None when the delegate is not in inventory + host = None + # delegate set, check whether the delegate has inventory vars + delegate = template.template(self.basedir, delegate, inject) + if delegate in inject['hostvars']: + # host == delegate if we need to lookup the + # python_interpreter from the delegate's inventory vars + host = delegate + + if host: + python_interp = inject['hostvars'][host].get('ansible_python_interpreter', 'python') + else: + python_interp = 'python' + + cmd = conn.shell.checksum(path, python_interp) data = self._low_level_exec_command(conn, cmd, tmp, sudoable=True) data2 = utils.last_non_blank_line(data['stdout']) try: if data2 == '': # this may happen if the connection to the remote server - # failed, so just return "INVALIDMD5SUM" to avoid errors - return "INVALIDMD5SUM" + # failed, so just return "INVALIDCHECKSUM" to avoid errors + return "INVALIDCHECKSUM" else: return data2.split()[0] except IndexError: - sys.stderr.write("warning: md5sum command failed unusually, please report this to the list so it can be fixed\n") - sys.stderr.write("command: %s\n" % md5s) + sys.stderr.write("warning: Calculating checksum failed unusually, please report this to the list so it can be fixed\n") + sys.stderr.write("command: %s\n" % cmd) sys.stderr.write("----\n") sys.stderr.write("output: %s\n" % data) sys.stderr.write("----\n") # this will signal that it changed and allow things to keep going - return "INVALIDMD5SUM" + return "INVALIDCHECKSUM" # ***************************************************** @@ -1201,9 +1346,13 @@ class Runner(object): # Search module path(s) for named module. module_suffixes = getattr(conn, 'default_suffixes', None) - module_path = utils.plugins.module_finder.find_plugin(module_name, module_suffixes) + module_path = utils.plugins.module_finder.find_plugin(module_name, module_suffixes, transport=self.transport) if module_path is None: - raise errors.AnsibleFileNotFound("module %s not found in %s" % (module_name, utils.plugins.module_finder.print_paths())) + module_path2 = utils.plugins.module_finder.find_plugin('ping', module_suffixes) + if module_path2 is not None: + raise errors.AnsibleFileNotFound("module %s not found in configured module paths" % (module_name)) + else: + raise errors.AnsibleFileNotFound("module %s not found in configured module paths. Additionally, core modules are missing. If this is a checkout, run 'git submodule update --init --recursive' to correct this problem." % (module_name)) # insert shared code and arguments into the module @@ -1318,9 +1467,15 @@ class Runner(object): # Expose the current hostgroup to the bypassing plugins self.host_set = hosts # We aren't iterating over all the hosts in this - # group. So, just pick the first host in our group to + # group. So, just choose the "delegate_to" host if that is defined and is + # one of the targeted hosts, otherwise pick the first host in our group to # construct the conn object with. - result_data = self._executor(hosts[0], None).result + if self.delegate_to is not None and self.delegate_to in hosts: + host = self.delegate_to + else: + host = hosts[0] + + result_data = self._executor(host, None).result # Create a ResultData item for each host in this group # using the returned result. If we didn't do this we would # get false reports of dark hosts. diff --git a/lib/ansible/runner/action_plugins/assemble.py b/lib/ansible/runner/action_plugins/assemble.py index fa4b6946294..287e9348655 100644 --- a/lib/ansible/runner/action_plugins/assemble.py +++ b/lib/ansible/runner/action_plugins/assemble.py @@ -108,10 +108,11 @@ class ActionModule(object): # Does all work assembling the file path = self._assemble_from_fragments(src, delimiter, _re) - pathmd5 = utils.md5s(path) - remote_md5 = self.runner._remote_md5(conn, tmp, dest) + path_checksum = utils.checksum_s(path) + dest = self.runner._remote_expand_user(conn, dest, tmp) + remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject) - if pathmd5 != remote_md5: + if path_checksum != remote_checksum: resultant = file(path).read() if self.runner.diff: dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True) @@ -124,7 +125,7 @@ class ActionModule(object): xfered = self.runner._transfer_str(conn, tmp, 'src', resultant) # fix file permissions when the copy is done as a different user - if self.runner.sudo and self.runner.sudo_user != 'root': + if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root': self.runner._remote_chmod(conn, 'a+r', xfered, tmp) # run the copy module @@ -147,6 +148,11 @@ class ActionModule(object): dest=dest, original_basename=os.path.basename(src), ) + + # make sure checkmod is passed on correctly + if self.runner.noop_on_check(inject): + new_module_args['CHECKMODE'] = True + module_args_tmp = utils.merge_module_args(module_args, new_module_args) return self.runner._execute_module(conn, tmp, 'file', module_args_tmp, inject=inject) diff --git a/lib/ansible/runner/action_plugins/copy.py b/lib/ansible/runner/action_plugins/copy.py index 8dfb42ee2d7..9f6797a02aa 100644 --- a/lib/ansible/runner/action_plugins/copy.py +++ b/lib/ansible/runner/action_plugins/copy.py @@ -157,12 +157,15 @@ class ActionModule(object): if "-tmp-" not in tmp_path: tmp_path = self.runner._make_tmp_path(conn) - for source_full, source_rel in source_files: - # Generate the MD5 hash of the local file. - local_md5 = utils.md5(source_full) + # expand any user home dir specifier + dest = self.runner._remote_expand_user(conn, dest, tmp_path) - # If local_md5 is not defined we can't find the file so we should fail out. - if local_md5 is None: + for source_full, source_rel in source_files: + # Generate a hash of the local file. + local_checksum = utils.checksum(source_full) + + # If local_checksum is not defined we can't find the file so we should fail out. + if local_checksum is None: result = dict(failed=True, msg="could not find src=%s" % source_full) return ReturnData(conn=conn, result=result) @@ -174,27 +177,31 @@ class ActionModule(object): else: dest_file = conn.shell.join_path(dest) - # Attempt to get the remote MD5 Hash. - remote_md5 = self.runner._remote_md5(conn, tmp_path, dest_file) + # Attempt to get the remote checksum + remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject) - if remote_md5 == '3': - # The remote_md5 was executed on a directory. + if remote_checksum == '3': + # The remote_checksum was executed on a directory. if content is not None: # If source was defined as content remove the temporary file and fail out. self._remove_tempfile_if_content_defined(content, content_tempfile) result = dict(failed=True, msg="can not use content with a dir as dest") return ReturnData(conn=conn, result=result) else: - # Append the relative source location to the destination and retry remote_md5. + # Append the relative source location to the destination and retry remote_checksum dest_file = conn.shell.join_path(dest, source_rel) - remote_md5 = self.runner._remote_md5(conn, tmp_path, dest_file) + remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject) - if remote_md5 != '1' and not force: - # remote_file does not exist so continue to next iteration. + if remote_checksum == '4': + result = dict(msg="python isn't present on the system. Unable to compute checksum", failed=True) + return ReturnData(conn=conn, result=result) + + if remote_checksum != '1' and not force: + # remote_file exists so continue to next iteration. continue - if local_md5 != remote_md5: - # The MD5 hashes don't match and we will change or error out. + if local_checksum != remote_checksum: + # The checksums don't match and we will change or error out. changed = True # Create a tmp_path if missing only if this is not recursive. @@ -227,7 +234,7 @@ class ActionModule(object): self._remove_tempfile_if_content_defined(content, content_tempfile) # fix file permissions when the copy is done as a different user - if self.runner.sudo and self.runner.sudo_user != 'root' and not raw: + if (self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root') and not raw: self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp_path) if raw: @@ -254,7 +261,7 @@ class ActionModule(object): module_executed = True else: - # no need to transfer the file, already correct md5, but still need to call + # no need to transfer the file, already correct hash, but still need to call # the file module in case we want to change attributes self._remove_tempfile_if_content_defined(content, content_tempfile) @@ -283,8 +290,8 @@ class ActionModule(object): module_executed = True module_result = module_return.result - if not module_result.get('md5sum'): - module_result['md5sum'] = local_md5 + if not module_result.get('checksum'): + module_result['checksum'] = local_checksum if module_result.get('failed') == True: return module_return if module_result.get('changed') == True: diff --git a/lib/ansible/runner/action_plugins/debug.py b/lib/ansible/runner/action_plugins/debug.py index 98820956572..eaf1364c3f3 100644 --- a/lib/ansible/runner/action_plugins/debug.py +++ b/lib/ansible/runner/action_plugins/debug.py @@ -51,8 +51,8 @@ class ActionModule(object): else: result = dict(msg=args['msg']) elif 'var' in args and not utils.LOOKUP_REGEX.search(args['var']): - results = template.template(self.basedir, "{{ %s }}" % args['var'], inject) - result[args['var']] = results + results = template.template(self.basedir, args['var'], inject, convert_bare=True) + result['var'] = { args['var']: results } # force flag to make debug output module always verbose result['verbose_always'] = True diff --git a/lib/ansible/runner/action_plugins/fetch.py b/lib/ansible/runner/action_plugins/fetch.py index 00622f12824..3fa748ccbd1 100644 --- a/lib/ansible/runner/action_plugins/fetch.py +++ b/lib/ansible/runner/action_plugins/fetch.py @@ -50,19 +50,55 @@ class ActionModule(object): flat = utils.boolean(flat) fail_on_missing = options.get('fail_on_missing', False) fail_on_missing = utils.boolean(fail_on_missing) - validate_md5 = options.get('validate_md5', True) - validate_md5 = utils.boolean(validate_md5) + validate_checksum = options.get('validate_checksum', None) + if validate_checksum is not None: + validate_checksum = utils.boolean(validate_checksum) + # Alias for validate_checksum (old way of specifying it) + validate_md5 = options.get('validate_md5', None) + if validate_md5 is not None: + validate_md5 = utils.boolean(validate_md5) + if validate_md5 is None and validate_checksum is None: + # Default + validate_checksum = True + elif validate_checksum is None: + validate_checksum = validate_md5 + elif validate_md5 is not None and validate_checksum is not None: + results = dict(failed=True, msg="validate_checksum and validate_md5 cannot both be specified") + return ReturnData(conn, result=results) + if source is None or dest is None: results = dict(failed=True, msg="src and dest are required") return ReturnData(conn=conn, result=results) - source = os.path.expanduser(source) source = conn.shell.join_path(source) + source = self.runner._remote_expand_user(conn, source, tmp) + + # calculate checksum for the remote file + remote_checksum = self.runner._remote_checksum(conn, tmp, source, inject) + + # use slurp if sudo and permissions are lacking + remote_data = None + if remote_checksum in ('1', '2') or self.runner.sudo: + slurpres = self.runner._execute_module(conn, tmp, 'slurp', 'src=%s' % source, inject=inject) + if slurpres.is_successful(): + if slurpres.result['encoding'] == 'base64': + remote_data = base64.b64decode(slurpres.result['content']) + if remote_data is not None: + remote_checksum = utils.checksum_s(remote_data) + # the source path may have been expanded on the + # target system, so we compare it here and use the + # expanded version if it's different + remote_source = slurpres.result.get('source') + if remote_source and remote_source != source: + source = remote_source + + # calculate the destination name if os.path.sep not in conn.shell.join_path('a', ''): source_local = source.replace('\\', '/') else: source_local = source + dest = os.path.expanduser(dest) if flat: if dest.endswith("/"): # if the path ends with "/", we'll use the source filename as the @@ -76,40 +112,30 @@ class ActionModule(object): # files are saved in dest dir, with a subdir for each host, then the filename dest = "%s/%s/%s" % (utils.path_dwim(self.runner.basedir, dest), conn.host, source_local) - dest = os.path.expanduser(dest.replace("//","/")) + dest = dest.replace("//","/") - # calculate md5 sum for the remote file - remote_md5 = self.runner._remote_md5(conn, tmp, source) - - # use slurp if sudo and permissions are lacking - remote_data = None - if remote_md5 in ('1', '2') or self.runner.sudo: - slurpres = self.runner._execute_module(conn, tmp, 'slurp', 'src=%s' % source, inject=inject) - if slurpres.is_successful(): - if slurpres.result['encoding'] == 'base64': - remote_data = base64.b64decode(slurpres.result['content']) - if remote_data is not None: - remote_md5 = utils.md5s(remote_data) - - # these don't fail because you may want to transfer a log file that possibly MAY exist - # but keep going to fetch other log files - if remote_md5 == '0': - result = dict(msg="unable to calculate the md5 sum of the remote file", file=source, changed=False) - return ReturnData(conn=conn, result=result) - if remote_md5 == '1': - if fail_on_missing: - result = dict(failed=True, msg="the remote file does not exist", file=source) - else: - result = dict(msg="the remote file does not exist, not transferring, ignored", file=source, changed=False) - return ReturnData(conn=conn, result=result) - if remote_md5 == '2': - result = dict(msg="no read permission on remote file, not transferring, ignored", file=source, changed=False) + if remote_checksum in ('0', '1', '2', '3', '4'): + # these don't fail because you may want to transfer a log file that possibly MAY exist + # but keep going to fetch other log files + if remote_checksum == '0': + result = dict(msg="unable to calculate the checksum of the remote file", file=source, changed=False) + elif remote_checksum == '1': + if fail_on_missing: + result = dict(failed=True, msg="the remote file does not exist", file=source) + else: + result = dict(msg="the remote file does not exist, not transferring, ignored", file=source, changed=False) + elif remote_checksum == '2': + result = dict(msg="no read permission on remote file, not transferring, ignored", file=source, changed=False) + elif remote_checksum == '3': + result = dict(msg="remote file is a directory, fetch cannot work on directories", file=source, changed=False) + elif remote_checksum == '4': + result = dict(msg="python isn't present on the system. Unable to compute checksum", file=source, changed=False) return ReturnData(conn=conn, result=result) - # calculate md5 sum for the local file - local_md5 = utils.md5(dest) + # calculate checksum for the local file + local_checksum = utils.checksum(dest) - if remote_md5 != local_md5: + if remote_checksum != local_checksum: # create the containing directories, if needed if not os.path.isdir(os.path.dirname(dest)): os.makedirs(os.path.dirname(dest)) @@ -121,13 +147,27 @@ class ActionModule(object): f = open(dest, 'w') f.write(remote_data) f.close() - new_md5 = utils.md5(dest) - if validate_md5 and new_md5 != remote_md5: - result = dict(failed=True, md5sum=new_md5, msg="md5 mismatch", file=source, dest=dest, remote_md5sum=remote_md5) + new_checksum = utils.secure_hash(dest) + # For backwards compatibility. We'll return None on FIPS enabled + # systems + try: + new_md5 = utils.md5(dest) + except ValueError: + new_md5 = None + + if validate_checksum and new_checksum != remote_checksum: + result = dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum) return ReturnData(conn=conn, result=result) - result = dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=remote_md5) + result = dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum) return ReturnData(conn=conn, result=result) else: - result = dict(changed=False, md5sum=local_md5, file=source, dest=dest) + # For backwards compatibility. We'll return None on FIPS enabled + # systems + try: + local_md5 = utils.md5(dest) + except ValueError: + local_md5 = None + + result = dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum) return ReturnData(conn=conn, result=result) diff --git a/lib/ansible/runner/action_plugins/patch.py b/lib/ansible/runner/action_plugins/patch.py new file mode 100644 index 00000000000..7c2947cafe9 --- /dev/null +++ b/lib/ansible/runner/action_plugins/patch.py @@ -0,0 +1,66 @@ +# (c) 2015, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License + +import os +from ansible import utils +from ansible.runner.return_data import ReturnData + +class ActionModule(object): + + def __init__(self, runner): + self.runner = runner + + def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs): + + options = {} + if complex_args: + options.update(complex_args) + options.update(utils.parse_kv(module_args)) + + src = options.get('src', None) + dest = options.get('dest', None) + remote_src = utils.boolean(options.get('remote_src', 'yes')) + + if src is None or dest is None: + result = dict(failed=True, msg="src and dest are required") + return ReturnData(conn=conn, comm_ok=False, result=result) + + if remote_src: + return self.runner._execute_module(conn, tmp, 'patch', module_args, inject=inject, complex_args=complex_args) + + # Source is local + if '_original_file' in inject: + src = utils.path_dwim_relative(inject['_original_file'], 'files', src, self.runner.basedir) + else: + src = utils.path_dwim(self.runner.basedir, src) + + tmp_src = tmp + src + conn.put_file(src, tmp_src) + + if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root': + if not self.runner.noop_on_check(inject): + self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp) + + new_module_args = dict( + src=tmp_src, + ) + + if self.runner.noop_on_check(inject): + new_module_args['CHECKMODE'] = True + + module_args = utils.merge_module_args(module_args, new_module_args) + + return self.runner._execute_module(conn, tmp, 'patch', module_args, inject=inject, complex_args=complex_args) diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py index b16e5f66e6a..c74792d3363 100644 --- a/lib/ansible/runner/action_plugins/template.py +++ b/lib/ansible/runner/action_plugins/template.py @@ -33,9 +33,6 @@ class ActionModule(object): def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs): ''' handler for template operations ''' - # note: since this module just calls the copy module, the --check mode support - # can be implemented entirely over there - if not self.runner.is_playbook: raise errors.AnsibleError("in current versions of ansible, templates are only usable in playbooks") @@ -78,6 +75,8 @@ class ActionModule(object): else: source = utils.path_dwim(self.runner.basedir, source) + # Expand any user home dir specification + dest = self.runner._remote_expand_user(conn, dest, tmp) if dest.endswith("/"): # CCTODO: Fix path for Windows hosts. base = os.path.basename(source) @@ -90,10 +89,17 @@ class ActionModule(object): result = dict(failed=True, msg=type(e).__name__ + ": " + str(e)) return ReturnData(conn=conn, comm_ok=False, result=result) - local_md5 = utils.md5s(resultant) - remote_md5 = self.runner._remote_md5(conn, tmp, dest) + local_checksum = utils.checksum_s(resultant) + remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject) - if local_md5 != remote_md5: + if remote_checksum in ('0', '2', '3', '4'): + # Note: 1 means the file is not present which is fine; template + # will create it + result = dict(failed=True, msg="failed to checksum remote file." + " Checksum error code: %s" % remote_checksum) + return ReturnData(conn=conn, comm_ok=True, result=result) + + if local_checksum != remote_checksum: # template is different from the remote value @@ -113,7 +119,7 @@ class ActionModule(object): xfered = self.runner._transfer_str(conn, tmp, 'source', resultant) # fix file permissions when the copy is done as a different user - if self.runner.sudo and self.runner.sudo_user != 'root': + if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root': self.runner._remote_chmod(conn, 'a+r', xfered, tmp) # run the copy module @@ -121,6 +127,7 @@ class ActionModule(object): src=xfered, dest=dest, original_basename=os.path.basename(source), + follow=True, ) module_args_tmp = utils.merge_module_args(module_args, new_module_args) @@ -132,5 +139,22 @@ class ActionModule(object): res.diff = dict(before=dest_contents, after=resultant) return res else: - return self.runner._execute_module(conn, tmp, 'file', module_args, inject=inject, complex_args=complex_args) + # when running the file module based on the template data, we do + # not want the source filename (the name of the template) to be used, + # since this would mess up links, so we clear the src param and tell + # the module to follow links. When doing that, we have to set + # original_basename to the template just in case the dest is + # a directory. + module_args = '' + new_module_args = dict( + src=None, + original_basename=os.path.basename(source), + follow=True, + ) + # be sure to inject the check mode param into the module args and + # rely on the file module to report its changed status + if self.runner.noop_on_check(inject): + new_module_args['CHECKMODE'] = True + options.update(new_module_args) + return self.runner._execute_module(conn, tmp, 'file', module_args, inject=inject, complex_args=options) diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/lib/ansible/runner/action_plugins/unarchive.py index 1e3795955d2..572cd03d5e4 100644 --- a/lib/ansible/runner/action_plugins/unarchive.py +++ b/lib/ansible/runner/action_plugins/unarchive.py @@ -49,12 +49,33 @@ class ActionModule(object): source = options.get('src', None) dest = options.get('dest', None) copy = utils.boolean(options.get('copy', 'yes')) + creates = options.get('creates', None) if source is None or dest is None: result = dict(failed=True, msg="src (or content) and dest are required") return ReturnData(conn=conn, result=result) - dest = os.path.expanduser(dest) # CCTODO: Fix path for Windows hosts. + if creates: + # do not run the command if the line contains creates=filename + # and the filename already exists. This allows idempotence + # of command executions. + module_args_tmp = "" + complex_args_tmp = dict(path=creates, get_md5=False, get_checksum=False) + module_return = self.runner._execute_module(conn, tmp, 'stat', module_args_tmp, inject=inject, + complex_args=complex_args_tmp, persist_files=True) + stat = module_return.result.get('stat', None) + if stat and stat.get('exists', False): + return ReturnData( + conn=conn, + comm_ok=True, + result=dict( + skipped=True, + changed=False, + msg=("skipped, since %s exists" % creates) + ) + ) + + dest = self.runner._remote_expand_user(conn, dest, tmp) # CCTODO: Fix path for Windows hosts. source = template.template(self.runner.basedir, os.path.expanduser(source), inject) if copy: if '_original_file' in inject: @@ -62,8 +83,11 @@ class ActionModule(object): else: source = utils.path_dwim(self.runner.basedir, source) - remote_md5 = self.runner._remote_md5(conn, tmp, dest) - if remote_md5 != '3': + remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject) + if remote_checksum == '4': + result = dict(failed=True, msg="python isn't present on the system. Unable to compute checksum") + return ReturnData(conn=conn, result=result) + if remote_checksum != '3': result = dict(failed=True, msg="dest '%s' must be an existing dir" % dest) return ReturnData(conn=conn, result=result) @@ -76,14 +100,23 @@ class ActionModule(object): # handle check mode client side # fix file permissions when the copy is done as a different user if copy: - if self.runner.sudo and self.runner.sudo_user != 'root': - self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp) + if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root': + if not self.runner.noop_on_check(inject): + self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp) # Build temporary module_args. new_module_args = dict( src=tmp_src, original_basename=os.path.basename(source), ) + + # make sure checkmod is passed on correctly + if self.runner.noop_on_check(inject): + new_module_args['CHECKMODE'] = True + module_args = utils.merge_module_args(module_args, new_module_args) else: module_args = "%s original_basename=%s" % (module_args, pipes.quote(os.path.basename(source))) + # make sure checkmod is passed on correctly + if self.runner.noop_on_check(inject): + module_args += " CHECKMODE=True" return self.runner._execute_module(conn, tmp, 'unarchive', module_args, inject=inject, complex_args=complex_args) diff --git a/lib/ansible/runner/action_plugins/win_copy.py b/lib/ansible/runner/action_plugins/win_copy.py new file mode 100644 index 00000000000..28362195c96 --- /dev/null +++ b/lib/ansible/runner/action_plugins/win_copy.py @@ -0,0 +1,377 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os + +from ansible import utils +import ansible.constants as C +import ansible.utils.template as template +from ansible import errors +from ansible.runner.return_data import ReturnData +import base64 +import json +import stat +import tempfile +import pipes + +## fixes https://github.com/ansible/ansible/issues/3518 +# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html +import sys +reload(sys) +sys.setdefaultencoding("utf8") + + +class ActionModule(object): + + def __init__(self, runner): + self.runner = runner + + def run(self, conn, tmp_path, module_name, module_args, inject, complex_args=None, **kwargs): + ''' handler for file transfer operations ''' + + # load up options + options = {} + if complex_args: + options.update(complex_args) + options.update(utils.parse_kv(module_args)) + source = options.get('src', None) + content = options.get('content', None) + dest = options.get('dest', None) + raw = utils.boolean(options.get('raw', 'no')) + force = utils.boolean(options.get('force', 'yes')) + + # content with newlines is going to be escaped to safely load in yaml + # now we need to unescape it so that the newlines are evaluated properly + # when writing the file to disk + if content: + if isinstance(content, unicode): + try: + content = content.decode('unicode-escape') + except UnicodeDecodeError: + pass + + if (source is None and content is None and not 'first_available_file' in inject) or dest is None: + result=dict(failed=True, msg="src (or content) and dest are required") + return ReturnData(conn=conn, result=result) + elif (source is not None or 'first_available_file' in inject) and content is not None: + result=dict(failed=True, msg="src and content are mutually exclusive") + return ReturnData(conn=conn, result=result) + + # Check if the source ends with a "/" + source_trailing_slash = False + if source: + source_trailing_slash = source.endswith("/") + + # Define content_tempfile in case we set it after finding content populated. + content_tempfile = None + + # If content is defined make a temp file and write the content into it. + if content is not None: + try: + # If content comes to us as a dict it should be decoded json. + # We need to encode it back into a string to write it out. + if type(content) is dict: + content_tempfile = self._create_content_tempfile(json.dumps(content)) + else: + content_tempfile = self._create_content_tempfile(content) + source = content_tempfile + except Exception, err: + result = dict(failed=True, msg="could not write content temp file: %s" % err) + return ReturnData(conn=conn, result=result) + # if we have first_available_file in our vars + # look up the files and use the first one we find as src + elif 'first_available_file' in inject: + found = False + for fn in inject.get('first_available_file'): + fn_orig = fn + fnt = template.template(self.runner.basedir, fn, inject) + fnd = utils.path_dwim(self.runner.basedir, fnt) + if not os.path.exists(fnd) and '_original_file' in inject: + fnd = utils.path_dwim_relative(inject['_original_file'], 'files', fnt, self.runner.basedir, check=False) + if os.path.exists(fnd): + source = fnd + found = True + break + if not found: + results = dict(failed=True, msg="could not find src in first_available_file list") + return ReturnData(conn=conn, result=results) + else: + source = template.template(self.runner.basedir, source, inject) + if '_original_file' in inject: + source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir) + else: + source = utils.path_dwim(self.runner.basedir, source) + + # A list of source file tuples (full_path, relative_path) which will try to copy to the destination + source_files = [] + + # If source is a directory populate our list else source is a file and translate it to a tuple. + if os.path.isdir(source): + # Get the amount of spaces to remove to get the relative path. + if source_trailing_slash: + sz = len(source) + 1 + else: + sz = len(source.rsplit('/', 1)[0]) + 1 + + # Walk the directory and append the file tuples to source_files. + for base_path, sub_folders, files in os.walk(source): + for file in files: + full_path = os.path.join(base_path, file) + rel_path = full_path[sz:] + source_files.append((full_path, rel_path)) + + # If it's recursive copy, destination is always a dir, + # explicitly mark it so (note - copy module relies on this). + if not conn.shell.path_has_trailing_slash(dest): + dest = conn.shell.join_path(dest, '') + else: + source_files.append((source, os.path.basename(source))) + + changed = False + diffs = [] + module_result = {"changed": False} + + # A register for if we executed a module. + # Used to cut down on command calls when not recursive. + module_executed = False + + # Tell _execute_module to delete the file if there is one file. + delete_remote_tmp = (len(source_files) == 1) + + # If this is a recursive action create a tmp_path that we can share as the _exec_module create is too late. + if not delete_remote_tmp: + if "-tmp-" not in tmp_path: + tmp_path = self.runner._make_tmp_path(conn) + + # expand any user home dir specifier + dest = self.runner._remote_expand_user(conn, dest, tmp_path) + + for source_full, source_rel in source_files: + # Generate a hash of the local file. + local_checksum = utils.checksum(source_full) + + # If local_checksum is not defined we can't find the file so we should fail out. + if local_checksum is None: + result = dict(failed=True, msg="could not find src=%s" % source_full) + return ReturnData(conn=conn, result=result) + + # This is kind of optimization - if user told us destination is + # dir, do path manipulation right away, otherwise we still check + # for dest being a dir via remote call below. + if conn.shell.path_has_trailing_slash(dest): + dest_file = conn.shell.join_path(dest, source_rel) + else: + dest_file = conn.shell.join_path(dest) + + # Attempt to get the remote checksum + remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject) + + if remote_checksum == '3': + # The remote_checksum was executed on a directory. + if content is not None: + # If source was defined as content remove the temporary file and fail out. + self._remove_tempfile_if_content_defined(content, content_tempfile) + result = dict(failed=True, msg="can not use content with a dir as dest") + return ReturnData(conn=conn, result=result) + else: + # Append the relative source location to the destination and retry remote_checksum. + dest_file = conn.shell.join_path(dest, source_rel) + remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject) + + if remote_checksum != '1' and not force: + # remote_file does not exist so continue to next iteration. + continue + + if local_checksum != remote_checksum: + # The checksums don't match and we will change or error out. + changed = True + + # Create a tmp_path if missing only if this is not recursive. + # If this is recursive we already have a tmp_path. + if delete_remote_tmp: + if "-tmp-" not in tmp_path: + tmp_path = self.runner._make_tmp_path(conn) + + if self.runner.diff and not raw: + diff = self._get_diff_data(conn, tmp_path, inject, dest_file, source_full) + else: + diff = {} + + if self.runner.noop_on_check(inject): + self._remove_tempfile_if_content_defined(content, content_tempfile) + diffs.append(diff) + changed = True + module_result = dict(changed=True) + continue + + # Define a remote directory that we will copy the file to. + tmp_src = tmp_path + 'source' + + if not raw: + conn.put_file(source_full, tmp_src) + else: + conn.put_file(source_full, dest_file) + + # We have copied the file remotely and no longer require our content_tempfile + self._remove_tempfile_if_content_defined(content, content_tempfile) + + # fix file permissions when the copy is done as a different user + if (self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root') and not raw: + self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp_path) + + if raw: + # Continue to next iteration if raw is defined. + continue + + # Run the copy module + + # src and dest here come after original and override them + # we pass dest only to make sure it includes trailing slash in case of recursive copy + new_module_args = dict( + src=tmp_src, + dest=dest, + original_basename=source_rel + ) + if self.runner.noop_on_check(inject): + new_module_args['CHECKMODE'] = True + if self.runner.no_log: + new_module_args['NO_LOG'] = True + + module_args_tmp = utils.merge_module_args(module_args, new_module_args) + + module_return = self.runner._execute_module(conn, tmp_path, 'win_copy', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp) + module_executed = True + + else: + # no need to transfer the file, already correct md5, but still need to call + # the file module in case we want to change attributes + self._remove_tempfile_if_content_defined(content, content_tempfile) + + if raw: + # Continue to next iteration if raw is defined. + # self.runner._remove_tmp_path(conn, tmp_path) + continue + + tmp_src = tmp_path + source_rel + + # Build temporary module_args. + new_module_args = dict( + src=tmp_src, + dest=dest, + original_basename=source_rel + ) + if self.runner.noop_on_check(inject): + new_module_args['CHECKMODE'] = True + if self.runner.no_log: + new_module_args['NO_LOG'] = True + + module_args_tmp = utils.merge_module_args(module_args, new_module_args) + + # Execute the file module. + module_return = self.runner._execute_module(conn, tmp_path, 'win_file', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp) + module_executed = True + + module_result = module_return.result + if not module_result.get('checksum'): + module_result['checksum'] = local_checksum + if module_result.get('failed') == True: + return module_return + if module_result.get('changed') == True: + changed = True + + # Delete tmp_path if we were recursive or if we did not execute a module. + if (not C.DEFAULT_KEEP_REMOTE_FILES and not delete_remote_tmp) \ + or (not C.DEFAULT_KEEP_REMOTE_FILES and delete_remote_tmp and not module_executed): + self.runner._remove_tmp_path(conn, tmp_path) + + # the file module returns the file path as 'path', but + # the copy module uses 'dest', so add it if it's not there + if 'path' in module_result and 'dest' not in module_result: + module_result['dest'] = module_result['path'] + + # TODO: Support detailed status/diff for multiple files + if len(source_files) == 1: + result = module_result + else: + result = dict(dest=dest, src=source, changed=changed) + if len(diffs) == 1: + return ReturnData(conn=conn, result=result, diff=diffs[0]) + else: + return ReturnData(conn=conn, result=result) + + def _create_content_tempfile(self, content): + ''' Create a tempfile containing defined content ''' + fd, content_tempfile = tempfile.mkstemp() + f = os.fdopen(fd, 'w') + try: + f.write(content) + except Exception, err: + os.remove(content_tempfile) + raise Exception(err) + finally: + f.close() + return content_tempfile + + def _get_diff_data(self, conn, tmp, inject, destination, source): + peek_result = self.runner._execute_module(conn, tmp, 'win_file', "path=%s diff_peek=1" % destination, inject=inject, persist_files=True) + + if not peek_result.is_successful(): + return {} + + diff = {} + if peek_result.result['state'] == 'absent': + diff['before'] = '' + elif peek_result.result['appears_binary']: + diff['dst_binary'] = 1 + elif peek_result.result['size'] > utils.MAX_FILE_SIZE_FOR_DIFF: + diff['dst_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF + else: + dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % destination, inject=inject, persist_files=True) + if 'content' in dest_result.result: + dest_contents = dest_result.result['content'] + if dest_result.result['encoding'] == 'base64': + dest_contents = base64.b64decode(dest_contents) + else: + raise Exception("unknown encoding, failed: %s" % dest_result.result) + diff['before_header'] = destination + diff['before'] = dest_contents + + src = open(source) + src_contents = src.read(8192) + st = os.stat(source) + if "\x00" in src_contents: + diff['src_binary'] = 1 + elif st[stat.ST_SIZE] > utils.MAX_FILE_SIZE_FOR_DIFF: + diff['src_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF + else: + src.seek(0) + diff['after_header'] = source + diff['after'] = src.read() + + return diff + + def _remove_tempfile_if_content_defined(self, content, content_tempfile): + if content is not None: + os.remove(content_tempfile) + + + def _result_key_merge(self, options, results): + # add keys to file module results to mimic copy + if 'path' in results.result and 'dest' not in results.result: + results.result['dest'] = results.result['path'] + del results.result['path'] + return results diff --git a/lib/ansible/runner/action_plugins/win_template.py b/lib/ansible/runner/action_plugins/win_template.py new file mode 100644 index 00000000000..e32a5806c4b --- /dev/null +++ b/lib/ansible/runner/action_plugins/win_template.py @@ -0,0 +1,146 @@ +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import pipes +from ansible.utils import template +from ansible import utils +from ansible import errors +from ansible.runner.return_data import ReturnData +import base64 + +class ActionModule(object): + + TRANSFERS_FILES = True + + def __init__(self, runner): + self.runner = runner + + def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs): + ''' handler for template operations ''' + + if not self.runner.is_playbook: + raise errors.AnsibleError("in current versions of ansible, templates are only usable in playbooks") + + # load up options + options = {} + if complex_args: + options.update(complex_args) + options.update(utils.parse_kv(module_args)) + + source = options.get('src', None) + dest = options.get('dest', None) + + if (source is None and 'first_available_file' not in inject) or dest is None: + result = dict(failed=True, msg="src and dest are required") + return ReturnData(conn=conn, comm_ok=False, result=result) + + # if we have first_available_file in our vars + # look up the files and use the first one we find as src + + if 'first_available_file' in inject: + found = False + for fn in self.runner.module_vars.get('first_available_file'): + fn_orig = fn + fnt = template.template(self.runner.basedir, fn, inject) + fnd = utils.path_dwim(self.runner.basedir, fnt) + if not os.path.exists(fnd) and '_original_file' in inject: + fnd = utils.path_dwim_relative(inject['_original_file'], 'templates', fnt, self.runner.basedir, check=False) + if os.path.exists(fnd): + source = fnd + found = True + break + if not found: + result = dict(failed=True, msg="could not find src in first_available_file list") + return ReturnData(conn=conn, comm_ok=False, result=result) + else: + source = template.template(self.runner.basedir, source, inject) + + if '_original_file' in inject: + source = utils.path_dwim_relative(inject['_original_file'], 'templates', source, self.runner.basedir) + else: + source = utils.path_dwim(self.runner.basedir, source) + + if conn.shell.path_has_trailing_slash(dest): + base = os.path.basename(source) + dest = conn.shell.join_path(dest, base) + + # template the source data locally & get ready to transfer + try: + resultant = template.template_from_file(self.runner.basedir, source, inject, vault_password=self.runner.vault_pass) + except Exception, e: + result = dict(failed=True, msg=type(e).__name__ + ": " + str(e)) + return ReturnData(conn=conn, comm_ok=False, result=result) + + local_checksum = utils.checksum_s(resultant) + remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject) + + if local_checksum != remote_checksum: + + # template is different from the remote value + + # if showing diffs, we need to get the remote value + dest_contents = '' + + if self.runner.diff: + # using persist_files to keep the temp directory around to avoid needing to grab another + dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True) + if 'content' in dest_result.result: + dest_contents = dest_result.result['content'] + if dest_result.result['encoding'] == 'base64': + dest_contents = base64.b64decode(dest_contents) + else: + raise Exception("unknown encoding, failed: %s" % dest_result.result) + + xfered = self.runner._transfer_str(conn, tmp, 'source', resultant) + + # fix file permissions when the copy is done as a different user + if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root': + self.runner._remote_chmod(conn, 'a+r', xfered, tmp) + + # run the copy module + new_module_args = dict( + src=xfered, + dest=dest, + original_basename=os.path.basename(source), + follow=True, + ) + module_args_tmp = utils.merge_module_args(module_args, new_module_args) + + if self.runner.noop_on_check(inject): + return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=source, before=dest_contents, after=resultant)) + else: + res = self.runner._execute_module(conn, tmp, 'win_copy', module_args_tmp, inject=inject, complex_args=complex_args) + if res.result.get('changed', False): + res.diff = dict(before=dest_contents, after=resultant) + return res + else: + # when running the file module based on the template data, we do + # not want the source filename (the name of the template) to be used, + # since this would mess up links, so we clear the src param and tell + # the module to follow links + new_module_args = dict( + src=None, + follow=True, + ) + # be sure to inject the check mode param into the module args and + # rely on the file module to report its changed status + if self.runner.noop_on_check(inject): + new_module_args['CHECKMODE'] = True + module_args = utils.merge_module_args(module_args, new_module_args) + return self.runner._execute_module(conn, tmp, 'win_file', module_args, inject=inject, complex_args=complex_args) + diff --git a/lib/ansible/runner/connection.py b/lib/ansible/runner/connection.py index 429b3f190de..bb50bf5531f 100644 --- a/lib/ansible/runner/connection.py +++ b/lib/ansible/runner/connection.py @@ -20,6 +20,7 @@ import os import stat +import errno from ansible import utils from ansible.errors import AnsibleError @@ -36,8 +37,14 @@ class Connector(object): raise AnsibleError("unsupported connection type: %s" % transport) if private_key_file: # If private key is readable by user other than owner, flag an error - st = os.stat(private_key_file) - if st.st_mode & (stat.S_IRGRP | stat.S_IROTH): + st = None + try: + st = os.stat(private_key_file) + except (IOError, OSError), e: + if e.errno != errno.ENOENT: # file is missing, might be agent + raise(e) + + if st is not None and st.st_mode & (stat.S_IRGRP | stat.S_IROTH): raise AnsibleError("private_key_file (%s) is group-readable or world-readable and thus insecure - " "you will probably get an SSH failure" % (private_key_file,)) diff --git a/lib/ansible/runner/connection_plugins/accelerate.py b/lib/ansible/runner/connection_plugins/accelerate.py index 8277d805de2..a31124e119f 100644 --- a/lib/ansible/runner/connection_plugins/accelerate.py +++ b/lib/ansible/runner/connection_plugins/accelerate.py @@ -239,7 +239,7 @@ class Connection(object): executable = constants.DEFAULT_EXECUTABLE if self.runner.sudo and sudoable and sudo_user: - cmd, prompt, success_key = utils.make_sudo_cmd(sudo_user, executable, cmd) + cmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) vvv("EXEC COMMAND %s" % cmd) diff --git a/lib/ansible/runner/connection_plugins/local.py b/lib/ansible/runner/connection_plugins/local.py index ec57afcecf8..e282076ee1e 100644 --- a/lib/ansible/runner/connection_plugins/local.py +++ b/lib/ansible/runner/connection_plugins/local.py @@ -57,7 +57,7 @@ class Connection(object): else: local_cmd = cmd else: - local_cmd, prompt, success_key = utils.make_sudo_cmd(sudo_user, executable, cmd) + local_cmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) executable = executable.split()[0] if executable else None vvv("EXEC %s" % (local_cmd), host=self.host) diff --git a/lib/ansible/runner/connection_plugins/paramiko_ssh.py b/lib/ansible/runner/connection_plugins/paramiko_ssh.py index 5e3cfc55a96..4bb06e01c36 100644 --- a/lib/ansible/runner/connection_plugins/paramiko_ssh.py +++ b/lib/ansible/runner/connection_plugins/paramiko_ssh.py @@ -40,10 +40,10 @@ from ansible.callbacks import vvv from ansible import errors from ansible import utils from ansible import constants as C - + AUTHENTICITY_MSG=""" -paramiko: The authenticity of host '%s' can't be established. -The %s key fingerprint is %s. +paramiko: The authenticity of host '%s' can't be established. +The %s key fingerprint is %s. Are you sure you want to continue connecting (yes/no)? """ @@ -67,7 +67,7 @@ class MyAddPolicy(object): local L{HostKeys} object, and saving it. This is used by L{SSHClient}. """ - def __init__(self, runner): + def __init__(self, runner): self.runner = runner def missing_host_key(self, client, hostname, key): @@ -81,7 +81,7 @@ class MyAddPolicy(object): sys.stdin = self.runner._new_stdin fingerprint = hexlify(key.get_fingerprint()) ktype = key.get_name() - + # clear out any premature input on sys.stdin tcflush(sys.stdin, TCIFLUSH) @@ -103,7 +103,7 @@ class MyAddPolicy(object): # host keys are actually saved in close() function below # in order to control ordering. - + # keep connection objects on a per host basis to avoid repeated attempts to reconnect @@ -145,7 +145,7 @@ class Connection(object): vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self.user, self.port, self.host), host=self.host) ssh = paramiko.SSHClient() - + self.keyfile = os.path.expanduser("~/.ssh/known_hosts") if C.HOST_KEY_CHECKING: @@ -194,8 +194,8 @@ class Connection(object): try: - chan = self.ssh.get_transport().open_session() self.ssh.get_transport().set_keepalive(5) + chan = self.ssh.get_transport().open_session() except Exception, e: @@ -204,6 +204,8 @@ class Connection(object): msg += ": %s" % str(e) raise errors.AnsibleConnectionFailed(msg) + no_prompt_out = '' + no_prompt_err = '' if not (self.runner.sudo and sudoable) and not (self.runner.su and su): if executable: @@ -223,7 +225,7 @@ class Connection(object): width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0))) if self.runner.sudo or sudoable: - shcmd, prompt, success_key = utils.make_sudo_cmd(sudo_user, executable, cmd) + shcmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) elif self.runner.su or su: shcmd, prompt, success_key = utils.make_su_cmd(su_user, executable, cmd) @@ -259,6 +261,9 @@ class Connection(object): chan.sendall(self.runner.sudo_pass + '\n') elif su: chan.sendall(self.runner.su_pass + '\n') + else: + no_prompt_out += sudo_output + no_prompt_err += sudo_output except socket.timeout: @@ -267,7 +272,7 @@ class Connection(object): stdout = ''.join(chan.makefile('rb', bufsize)) stderr = ''.join(chan.makefile_stderr('rb', bufsize)) - return (chan.recv_exit_status(), '', stdout, stderr) + return (chan.recv_exit_status(), '', no_prompt_out + stdout, no_prompt_out + stderr) def put_file(self, in_path, out_path): ''' transfer a file from local to remote ''' @@ -313,7 +318,7 @@ class Connection(object): def _any_keys_added(self): - added_any = False + added_any = False for hostname, keys in self.ssh._host_keys.iteritems(): for keytype, key in keys.iteritems(): added_this_time = getattr(key, '_added_by_ansible_this_time', False) @@ -322,9 +327,9 @@ class Connection(object): return False def _save_ssh_host_keys(self, filename): - ''' - not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks - don't complain about it :) + ''' + not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks + don't complain about it :) ''' if not self._any_keys_added(): @@ -367,7 +372,7 @@ class Connection(object): if C.HOST_KEY_CHECKING and C.PARAMIKO_RECORD_HOST_KEYS and self._any_keys_added(): # add any new SSH host keys -- warning -- this could be slow - lockfile = self.keyfile.replace("known_hosts",".known_hosts.lock") + lockfile = self.keyfile.replace("known_hosts",".known_hosts.lock") dirname = os.path.dirname(self.keyfile) if not os.path.exists(dirname): os.makedirs(dirname) @@ -409,4 +414,4 @@ class Connection(object): fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN) self.ssh.close() - + diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index fdf5f0dc6e5..0ab4d0b859e 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -65,7 +65,7 @@ class Connection(object): else: self.common_args += ["-o", "ControlMaster=auto", "-o", "ControlPersist=60s", - "-o", "ControlPath=%s" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))] + "-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))] cp_in_use = False cp_path_set = False @@ -76,7 +76,7 @@ class Connection(object): cp_path_set = True if cp_in_use and not cp_path_set: - self.common_args += ["-o", "ControlPath=%s" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))] + self.common_args += ["-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))] if not C.HOST_KEY_CHECKING: self.common_args += ["-o", "StrictHostKeyChecking=no"] @@ -230,6 +230,7 @@ class Connection(object): host_fh.close() for line in data.split("\n"): + line = line.strip() if line is None or " " not in line: continue tokens = line.split() @@ -283,7 +284,7 @@ class Connection(object): else: ssh_cmd.append(cmd) else: - sudocmd, prompt, success_key = utils.make_sudo_cmd(sudo_user, executable, cmd) + sudocmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) ssh_cmd.append(sudocmd) vvv("EXEC %s" % ' '.join(ssh_cmd), host=self.host) @@ -301,6 +302,8 @@ class Connection(object): self._send_password() + no_prompt_out = '' + no_prompt_err = '' if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \ (self.runner.su and su and self.runner.su_pass): # several cases are handled for sudo privileges with password @@ -332,7 +335,7 @@ class Connection(object): "sudo", "Sorry, try again.") if sudo_errput.strip().endswith("%s%s" % (prompt, incorrect_password)): raise errors.AnsibleError('Incorrect sudo password') - elif sudo_errput.endswith(prompt): + elif prompt and sudo_errput.endswith(prompt): stdin.write(self.runner.sudo_pass + '\n') if p.stdout in rfd: @@ -351,6 +354,9 @@ class Connection(object): stdin.write(self.runner.sudo_pass + '\n') elif su: stdin.write(self.runner.su_pass + '\n') + else: + no_prompt_out += sudo_output + no_prompt_err += sudo_errput (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt) @@ -371,7 +377,7 @@ class Connection(object): if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'): raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh') - return (p.returncode, '', stdout, stderr) + return (p.returncode, '', no_prompt_out + stdout, no_prompt_err + stderr) def put_file(self, in_path, out_path): ''' transfer a file from local to remote ''' diff --git a/lib/ansible/runner/connection_plugins/winrm.py b/lib/ansible/runner/connection_plugins/winrm.py index d6e51710b5f..79e7ed58890 100644 --- a/lib/ansible/runner/connection_plugins/winrm.py +++ b/lib/ansible/runner/connection_plugins/winrm.py @@ -37,6 +37,13 @@ try: except ImportError: raise errors.AnsibleError("winrm is not installed") +HAVE_KERBEROS = False +try: + import kerberos + HAVE_KERBEROS = True +except ImportError: + pass + _winrm_cache = { # 'user:pwhash@host:port': } @@ -47,6 +54,11 @@ def vvvvv(msg, host=None): class Connection(object): '''WinRM connections over HTTP/HTTPS.''' + transport_schemes = { + 'http': [('kerberos', 'http'), ('plaintext', 'http'), ('plaintext', 'https')], + 'https': [('kerberos', 'https'), ('plaintext', 'http'), ('plaintext', 'https')], + } + def __init__(self, runner, host, port, user, password, *args, **kwargs): self.runner = runner self.host = host @@ -72,11 +84,10 @@ class Connection(object): if cache_key in _winrm_cache: vvvv('WINRM REUSE EXISTING CONNECTION: %s' % cache_key, host=self.host) return _winrm_cache[cache_key] - transport_schemes = [('plaintext', 'https'), ('plaintext', 'http')] # FIXME: ssl/kerberos - if port == 5985: - transport_schemes = reversed(transport_schemes) exc = None - for transport, scheme in transport_schemes: + for transport, scheme in self.transport_schemes['http' if port == 5985 else 'https']: + if transport == 'kerberos' and not HAVE_KERBEROS: + continue endpoint = urlparse.urlunsplit((scheme, netloc, '/wsman', '', '')) vvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self.host) @@ -143,7 +154,7 @@ class Connection(object): vvv("EXEC %s" % cmd, host=self.host) # For script/raw support. if cmd_parts and cmd_parts[0].lower().endswith('.ps1'): - script = powershell._build_file_cmd(cmd_parts) + script = powershell._build_file_cmd(cmd_parts, quote_args=False) cmd_parts = powershell._encode_script(script, as_list=True) try: result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True) @@ -193,7 +204,7 @@ class Connection(object): def fetch_file(self, in_path, out_path): out_path = out_path.replace('\\', '/') vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) - buffer_size = 2**20 # 1MB chunks + buffer_size = 2**19 # 0.5MB chunks if not os.path.exists(os.path.dirname(out_path)): os.makedirs(os.path.dirname(out_path)) out_file = None diff --git a/lib/ansible/runner/filter_plugins/core.py b/lib/ansible/runner/filter_plugins/core.py index 61b80bce2c5..22ce73970cd 100644 --- a/lib/ansible/runner/filter_plugins/core.py +++ b/lib/ansible/runner/filter_plugins/core.py @@ -15,21 +15,33 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +import sys import base64 import json import os.path -import yaml import types import pipes import glob import re import collections +import crypt +import hashlib +import string +from functools import partial import operator as py_operator -from ansible import errors -from ansible.utils import md5s -from distutils.version import LooseVersion, StrictVersion -from random import SystemRandom +from random import SystemRandom, shuffle +import uuid + +import yaml from jinja2.filters import environmentfilter +from distutils.version import LooseVersion, StrictVersion + +from ansible import errors +from ansible.utils.hashing import md5s, checksum_s +from ansible.utils.unicode import unicode_wrap + + +UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E') def to_nice_yaml(*a, **kw): @@ -42,6 +54,22 @@ def to_json(a, *args, **kw): def to_nice_json(a, *args, **kw): '''Make verbose, human readable JSON''' + # python-2.6's json encoder is buggy (can't encode hostvars) + if sys.version_info < (2, 7): + try: + import simplejson + except ImportError: + pass + else: + try: + major = int(simplejson.__version__.split('.')[0]) + except: + pass + else: + if major >= 2: + return simplejson.dumps(a, indent=4, sort_keys=True, *args, **kw) + # Fallback to the to_json filter + return to_json(a, *args, **kw) return json.dumps(a, indent=4, sort_keys=True, *args, **kw) def failed(*a, **kw): @@ -235,6 +263,48 @@ def rand(environment, end, start=None, step=None): else: raise errors.AnsibleFilterError('random can only be used on sequences and integers') +def randomize_list(mylist): + try: + mylist = list(mylist) + shuffle(mylist) + except: + pass + return mylist + +def get_hash(data, hashtype='sha1'): + + try: # see if hash is supported + h = hashlib.new(hashtype) + except: + return None + + h.update(data) + return h.hexdigest() + +def get_encrypted_password(password, hashtype='sha512', salt=None): + + # TODO: find a way to construct dynamically from system + cryptmethod= { + 'md5': '1', + 'blowfish': '2a', + 'sha256': '5', + 'sha512': '6', + } + + hastype = hashtype.lower() + if hashtype in cryptmethod: + if salt is None: + r = SystemRandom() + salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(16)]) + + saltstring = "$%s$%s" % (cryptmethod[hashtype],salt) + encrypted = crypt.crypt(password,saltstring) + return encrypted + + return None + +def to_uuid(string): + return str(uuid.uuid5(UUID_NAMESPACE_ANSIBLE, str(string))) class FilterModule(object): ''' Ansible core jinja2 filters ''' @@ -242,8 +312,11 @@ class FilterModule(object): def filters(self): return { # base 64 - 'b64decode': base64.b64decode, - 'b64encode': base64.b64encode, + 'b64decode': partial(unicode_wrap, base64.b64decode), + 'b64encode': partial(unicode_wrap, base64.b64encode), + + # uuid + 'to_uuid': to_uuid, # json 'to_json': to_json, @@ -256,11 +329,11 @@ class FilterModule(object): 'from_yaml': yaml.safe_load, # path - 'basename': os.path.basename, - 'dirname': os.path.dirname, - 'expanduser': os.path.expanduser, - 'realpath': os.path.realpath, - 'relpath': os.path.relpath, + 'basename': partial(unicode_wrap, os.path.basename), + 'dirname': partial(unicode_wrap, os.path.dirname), + 'expanduser': partial(unicode_wrap, os.path.expanduser), + 'realpath': partial(unicode_wrap, os.path.realpath), + 'relpath': partial(unicode_wrap, os.path.relpath), # failure testing 'failed' : failed, @@ -281,8 +354,16 @@ class FilterModule(object): # quote string for shell usage 'quote': quote, + # hash filters # md5 hex digest of string 'md5': md5s, + # sha1 hex digeset of string + 'sha1': checksum_s, + # checksum of string as used by ansible for checksuming files + 'checksum': checksum_s, + # generic hashing + 'password_hash': get_encrypted_password, + 'hash': get_hash, # file glob 'fileglob': fileglob, @@ -305,6 +386,7 @@ class FilterModule(object): # version comparison 'version_compare': version_compare, - # random numbers + # random stuff 'random': rand, + 'shuffle': randomize_list, } diff --git a/lib/ansible/runner/filter_plugins/ipaddr.py b/lib/ansible/runner/filter_plugins/ipaddr.py new file mode 100644 index 00000000000..bcb19b16fde --- /dev/null +++ b/lib/ansible/runner/filter_plugins/ipaddr.py @@ -0,0 +1,626 @@ +# (c) 2014, Maciej Delmanowski +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from functools import partial + +try: + import netaddr +except ImportError: + # in this case, we'll make the filters return error messages (see bottom) + netaddr = None +else: + class mac_linux(netaddr.mac_unix): + pass + mac_linux.word_fmt = '%.2x' + +from ansible import errors + + +# ---- IP address and network query helpers ---- + +def _empty_ipaddr_query(v, vtype): + # We don't have any query to process, so just check what type the user + # expects, and return the IP address in a correct format + if v: + if vtype == 'address': + return str(v.ip) + elif vtype == 'network': + return str(v) + +def _6to4_query(v, vtype, value): + if v.version == 4: + + if v.size == 1: + ipconv = str(v.ip) + elif v.size > 1: + if v.ip != v.network: + ipconv = str(v.ip) + else: + ipconv = False + + if ipaddr(ipconv, 'public'): + numbers = list(map(int, ipconv.split('.'))) + + try: + return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers) + except: + return False + + elif v.version == 6: + if vtype == 'address': + if ipaddr(str(v), '2002::/16'): + return value + elif vtype == 'network': + if v.ip != v.network: + if ipaddr(str(v.ip), '2002::/16'): + return value + else: + return False + +def _ip_query(v): + if v.size == 1: + return str(v.ip) + if v.size > 1: + if v.ip != v.network: + return str(v.ip) + +def _gateway_query(v): + if v.size > 1: + if v.ip != v.network: + return str(v.ip) + '/' + str(v.prefixlen) + +def _bool_ipaddr_query(v): + if v: + return True + +def _broadcast_query(v): + if v.size > 1: + return str(v.broadcast) + +def _cidr_query(v): + return str(v) + +def _cidr_lookup_query(v, iplist, value): + try: + if v in iplist: + return value + except: + return False + +def _host_query(v): + if v.size == 1: + return str(v) + elif v.size > 1: + if v.ip != v.network: + return str(v.ip) + '/' + str(v.prefixlen) + +def _hostmask_query(v): + return str(v.hostmask) + +def _int_query(v, vtype): + if vtype == 'address': + return int(v.ip) + elif vtype == 'network': + return str(int(v.ip)) + '/' + str(int(v.prefixlen)) + +def _ipv4_query(v, value): + if v.version == 6: + try: + return str(v.ipv4()) + except: + return False + else: + return value + +def _ipv6_query(v, value): + if v.version == 4: + return str(v.ipv6()) + else: + return value + +def _link_local_query(v, value): + v_ip = netaddr.IPAddress(str(v.ip)) + if v.version == 4: + if ipaddr(str(v_ip), '169.254.0.0/24'): + return value + + elif v.version == 6: + if ipaddr(str(v_ip), 'fe80::/10'): + return value + +def _loopback_query(v, value): + v_ip = netaddr.IPAddress(str(v.ip)) + if v_ip.is_loopback(): + return value + +def _multicast_query(v, value): + if v.is_multicast(): + return value + +def _net_query(v): + if v.size > 1: + if v.ip == v.network: + return str(v.network) + '/' + str(v.prefixlen) + +def _netmask_query(v): + if v.size > 1: + return str(v.netmask) + +def _network_query(v): + if v.size > 1: + return str(v.network) + +def _prefix_query(v): + return int(v.prefixlen) + +def _private_query(v, value): + if v.is_private(): + return value + +def _public_query(v, value): + v_ip = netaddr.IPAddress(str(v.ip)) + if v_ip.is_unicast() and not v_ip.is_private() and \ + not v_ip.is_loopback() and not v_ip.is_netmask() and \ + not v_ip.is_hostmask(): + return value + +def _revdns_query(v): + v_ip = netaddr.IPAddress(str(v.ip)) + return v_ip.reverse_dns + +def _size_query(v): + return v.size + +def _subnet_query(v): + return str(v.cidr) + +def _type_query(v): + if v.size == 1: + return 'address' + if v.size > 1: + if v.ip != v.network: + return 'address' + else: + return 'network' + +def _unicast_query(v, value): + if v.is_unicast(): + return value + +def _version_query(v): + return v.version + +def _wrap_query(v, vtype, value): + if v.version == 6: + if vtype == 'address': + return '[' + str(v.ip) + ']' + elif vtype == 'network': + return '[' + str(v.ip) + ']/' + str(v.prefixlen) + else: + return value + + +# ---- HWaddr query helpers ---- +def _bare_query(v): + v.dialect = netaddr.mac_bare + return str(v) + +def _bool_hwaddr_query(v): + if v: + return True + +def _cisco_query(v): + v.dialect = netaddr.mac_cisco + return str(v) + +def _empty_hwaddr_query(v, value): + if v: + return value + +def _linux_query(v): + v.dialect = mac_linux + return str(v) + +def _postgresql_query(v): + v.dialect = netaddr.mac_pgsql + return str(v) + +def _unix_query(v): + v.dialect = netaddr.mac_unix + return str(v) + +def _win_query(v): + v.dialect = netaddr.mac_eui48 + return str(v) + + +# ---- IP address and network filters ---- + +def ipaddr(value, query = '', version = False, alias = 'ipaddr'): + ''' Check if string is an IP address or network and filter it ''' + + query_func_extra_args = { + '': ('vtype',), + '6to4': ('vtype', 'value'), + 'cidr_lookup': ('iplist', 'value'), + 'int': ('vtype',), + 'ipv4': ('value',), + 'ipv6': ('value',), + 'link-local': ('value',), + 'loopback': ('value',), + 'lo': ('value',), + 'multicast': ('value',), + 'private': ('value',), + 'public': ('value',), + 'unicast': ('value',), + 'wrap': ('vtype', 'value'), + } + query_func_map = { + '': _empty_ipaddr_query, + '6to4': _6to4_query, + 'address': _ip_query, + 'address/prefix': _gateway_query, + 'bool': _bool_ipaddr_query, + 'broadcast': _broadcast_query, + 'cidr': _cidr_query, + 'cidr_lookup': _cidr_lookup_query, + 'gateway': _gateway_query, + 'gw': _gateway_query, + 'host': _host_query, + 'host/prefix': _gateway_query, + 'hostmask': _hostmask_query, + 'hostnet': _gateway_query, + 'int': _int_query, + 'ip': _ip_query, + 'ipv4': _ipv4_query, + 'ipv6': _ipv6_query, + 'link-local': _link_local_query, + 'lo': _loopback_query, + 'loopback': _loopback_query, + 'multicast': _multicast_query, + 'net': _net_query, + 'netmask': _netmask_query, + 'network': _network_query, + 'prefix': _prefix_query, + 'private': _private_query, + 'public': _public_query, + 'revdns': _revdns_query, + 'router': _gateway_query, + 'size': _size_query, + 'subnet': _subnet_query, + 'type': _type_query, + 'unicast': _unicast_query, + 'v4': _ipv4_query, + 'v6': _ipv6_query, + 'version': _version_query, + 'wrap': _wrap_query, + } + + vtype = None + + if not value: + return False + + elif value == True: + return False + + # Check if value is a list and parse each element + elif isinstance(value, (list, tuple)): + + _ret = [] + for element in value: + if ipaddr(element, str(query), version): + _ret.append(ipaddr(element, str(query), version)) + + if _ret: + return _ret + else: + return list() + + # Check if value is a number and convert it to an IP address + elif str(value).isdigit(): + + # We don't know what IP version to assume, so let's check IPv4 first, + # then IPv6 + try: + if ((not version) or (version and version == 4)): + v = netaddr.IPNetwork('0.0.0.0/0') + v.value = int(value) + v.prefixlen = 32 + elif version and version == 6: + v = netaddr.IPNetwork('::/0') + v.value = int(value) + v.prefixlen = 128 + + # IPv4 didn't work the first time, so it definitely has to be IPv6 + except: + try: + v = netaddr.IPNetwork('::/0') + v.value = int(value) + v.prefixlen = 128 + + # The value is too big for IPv6. Are you a nanobot? + except: + return False + + # We got an IP address, let's mark it as such + value = str(v) + vtype = 'address' + + # value has not been recognized, check if it's a valid IP string + else: + try: + v = netaddr.IPNetwork(value) + + # value is a valid IP string, check if user specified + # CIDR prefix or just an IP address, this will indicate default + # output format + try: + address, prefix = value.split('/') + vtype = 'network' + except: + vtype = 'address' + + # value hasn't been recognized, maybe it's a numerical CIDR? + except: + try: + address, prefix = value.split('/') + address.isdigit() + address = int(address) + prefix.isdigit() + prefix = int(prefix) + + # It's not numerical CIDR, give up + except: + return False + + # It is something, so let's try and build a CIDR from the parts + try: + v = netaddr.IPNetwork('0.0.0.0/0') + v.value = address + v.prefixlen = prefix + + # It's not a valid IPv4 CIDR + except: + try: + v = netaddr.IPNetwork('::/0') + v.value = address + v.prefixlen = prefix + + # It's not a valid IPv6 CIDR. Give up. + except: + return False + + # We have a valid CIDR, so let's write it in correct format + value = str(v) + vtype = 'network' + + # We have a query string but it's not in the known query types. Check if + # that string is a valid subnet, if so, we can check later if given IP + # address/network is inside that specific subnet + try: + ### ?? 6to4 and link-local were True here before. Should they still? + if query and (query not in query_func_map or query == 'cidr_lookup') and ipaddr(query, 'network'): + iplist = netaddr.IPSet([netaddr.IPNetwork(query)]) + query = 'cidr_lookup' + except: + pass + + # This code checks if value maches the IP version the user wants, ie. if + # it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()") + # If version does not match, return False + if version and v.version != version: + return False + + extras = [] + for arg in query_func_extra_args.get(query, tuple()): + extras.append(locals()[arg]) + try: + return query_func_map[query](v, *extras) + except KeyError: + try: + float(query) + if v.size == 1: + if vtype == 'address': + return str(v.ip) + elif vtype == 'network': + return str(v) + + elif v.size > 1: + try: + return str(v[query]) + '/' + str(v.prefixlen) + except: + return False + + else: + return value + + except: + raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query) + + return False + + +def ipwrap(value, query = ''): + try: + if isinstance(value, (list, tuple)): + _ret = [] + for element in value: + if ipaddr(element, query, version = False, alias = 'ipwrap'): + _ret.append(ipaddr(element, 'wrap')) + else: + _ret.append(element) + + return _ret + else: + _ret = ipaddr(value, query, version = False, alias = 'ipwrap') + if _ret: + return ipaddr(_ret, 'wrap') + else: + return value + + except: + return value + + +def ipv4(value, query = ''): + return ipaddr(value, query, version = 4, alias = 'ipv4') + + +def ipv6(value, query = ''): + return ipaddr(value, query, version = 6, alias = 'ipv6') + + +# Split given subnet into smaller subnets or find out the biggest subnet of +# a given IP address with given CIDR prefix +# Usage: +# +# - address or address/prefix | ipsubnet +# returns CIDR subnet of a given input +# +# - address/prefix | ipsubnet(cidr) +# returns number of possible subnets for given CIDR prefix +# +# - address/prefix | ipsubnet(cidr, index) +# returns new subnet with given CIDR prefix +# +# - address | ipsubnet(cidr) +# returns biggest subnet with given CIDR prefix that address belongs to +# +# - address | ipsubnet(cidr, index) +# returns next indexed subnet which contains given address +def ipsubnet(value, query = '', index = 'x'): + ''' Manipulate IPv4/IPv6 subnets ''' + + try: + vtype = ipaddr(value, 'type') + if vtype == 'address': + v = ipaddr(value, 'cidr') + elif vtype == 'network': + v = ipaddr(value, 'subnet') + + value = netaddr.IPNetwork(v) + except: + return False + + if not query: + return str(value) + + elif str(query).isdigit(): + vsize = ipaddr(v, 'size') + query = int(query) + + try: + float(index) + index = int(index) + + if vsize > 1: + try: + return str(list(value.subnet(query))[index]) + except: + return False + + elif vsize == 1: + try: + return str(value.supernet(query)[index]) + except: + return False + + except: + if vsize > 1: + try: + return str(len(list(value.subnet(query)))) + except: + return False + + elif vsize == 1: + try: + return str(value.supernet(query)[0]) + except: + return False + + return False + + +# ---- HWaddr / MAC address filters ---- + +def hwaddr(value, query = '', alias = 'hwaddr'): + ''' Check if string is a HW/MAC address and filter it ''' + + query_func_extra_args = { + '': ('value',), + } + query_func_map = { + '': _empty_hwaddr_query, + 'bare': _bare_query, + 'bool': _bool_hwaddr_query, + 'cisco': _cisco_query, + 'eui48': _win_query, + 'linux': _linux_query, + 'pgsql': _postgresql_query, + 'postgresql': _postgresql_query, + 'psql': _postgresql_query, + 'unix': _unix_query, + 'win': _win_query, + } + + try: + v = netaddr.EUI(value) + except: + if query and query != 'bool': + raise errors.AnsibleFilterError(alias + ': not a hardware address: %s' % value) + + extras = [] + for arg in query_func_extra_args.get(query, tuple()): + extras.append(locals()[arg]) + try: + return query_func_map[query](v, *extras) + except KeyError: + raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query) + + return False + +def macaddr(value, query = ''): + return hwaddr(value, query, alias = 'macaddr') + +def _need_netaddr(f_name, *args, **kwargs): + raise errors.AnsibleFilterError('The {0} filter requires python-netaddr be' + ' installed on the ansible controller'.format(f_name)) + +# ---- Ansible filters ---- + +class FilterModule(object): + ''' IP address and network manipulation filters ''' + filter_map = { + # IP addresses and networks + 'ipaddr': ipaddr, + 'ipwrap': ipwrap, + 'ipv4': ipv4, + 'ipv6': ipv6, + 'ipsubnet': ipsubnet, + + # MAC / HW addresses + 'hwaddr': hwaddr, + 'macaddr': macaddr + } + + def filters(self): + if netaddr: + return self.filter_map + else: + # Need to install python-netaddr for these filters to work + return dict((f, partial(_need_netaddr, f)) for f in self.filter_map) diff --git a/lib/ansible/runner/filter_plugins/math.py b/lib/ansible/runner/filter_plugins/math.py new file mode 100644 index 00000000000..7f6cc195556 --- /dev/null +++ b/lib/ansible/runner/filter_plugins/math.py @@ -0,0 +1,69 @@ +# (c) 2014, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import + +import math +from ansible import errors + +def isnotanumber(x): + try: + return math.isnan(x) + except TypeError: + return False + + +def logarithm(x, base=math.e): + try: + if base == 10: + return math.log10(x) + else: + return math.log(x, base) + except TypeError, e: + raise errors.AnsibleFilterError('log() can only be used on numbers: %s' % str(e)) + + +def power(x, y): + try: + return math.pow(x, y) + except TypeError, e: + raise errors.AnsibleFilterError('pow() can only be used on numbers: %s' % str(e)) + + +def inversepower(x, base=2): + try: + if base == 2: + return math.sqrt(x) + else: + return math.pow(x, 1.0/float(base)) + except TypeError, e: + raise errors.AnsibleFilterError('root() can only be used on numbers: %s' % str(e)) + + +class FilterModule(object): + ''' Ansible math jinja2 filters ''' + + def filters(self): + return { + # general math + 'isnan': isnotanumber, + + # exponents and logarithms + 'log': logarithm, + 'pow': power, + 'root': inversepower, + } diff --git a/lib/ansible/runner/lookup_plugins/consul_kv.py b/lib/ansible/runner/lookup_plugins/consul_kv.py new file mode 100755 index 00000000000..522fa8deb7d --- /dev/null +++ b/lib/ansible/runner/lookup_plugins/consul_kv.py @@ -0,0 +1,128 @@ +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +''' +Lookup plugin to grab metadata from a consul key value store. +============================================================ + +Plugin will lookup metadata for a playbook from the key value store in a +consul cluster. Values can be easily set in the kv store with simple rest +commands e.g. + +curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata + +this can then be looked up in a playbook as follows + +- debug: msg='key contains {{item}}' + with_consul_kv: + - 'key/to/retrieve' + + +Parameters can be provided after the key be more specific about what to retrieve e.g. + +- debug: msg='key contains {{item}}' + with_consul_kv: + - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98')}}' + +recurse: if true, will retrieve all the values that have the given key as prefix +index: if the key has a value with the specified index then this is returned + allowing access to historical values. +token: acl token to allow access to restricted values. + +By default this will lookup keys via the consul agent running on http://localhost:8500 +this can be changed by setting the env variable 'ANSIBLE_CONSUL_URL' to point to the url +of the kv store you'd like to use. + +''' + +###################################################################### + +import os +import sys +from urlparse import urlparse +from ansible import utils, errors + +try: + import json +except ImportError: + import simplejson as json + +try: + import consul +except ImportError, e: + print "failed=True msg='python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation'" + sys.exit(1) + + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + + self.basedir = basedir + self.agent_url = 'http://localhost:8500' + if os.getenv('ANSIBLE_CONSUL_URL') is not None: + self.agent_url = os.environ['ANSIBLE_CONSUL_URL'] + + def run(self, terms, inject=None, **kwargs): + + u = urlparse(self.agent_url) + consul_api = consul.Consul(host=u.hostname, port=u.port) + + values = [] + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + try: + for term in terms: + params = self.parse_params(term) + results = consul_api.kv.get(params['key'], + token=params['token'], + index=params['index'], + recurse=params['recurse']) + if results[1]: + # responds with a single or list of result maps + if isinstance(results[1], list): + for r in results[1]: + values.append(r['Value']) + else: + values.append(results[1]['Value']) + except Exception, e: + raise errors.AnsibleError( + "Error locating '%s' in kv store. Error was %s" % (term, e)) + + return values + + def parse_params(self, term): + params = term.split(' ') + + paramvals = { + 'key': params[0], + 'token': None, + 'recurse': False, + 'index': None + } + + # parameters specified? + try: + for param in params[1:]: + if param and len(param) > 0: + name, value = param.split('=') + assert name in paramvals, "% not a valid consul lookup parameter" % name + paramvals[name] = value + except (ValueError, AssertionError), e: + raise errors.AnsibleError(e) + + return paramvals diff --git a/lib/ansible/runner/lookup_plugins/first_found.py b/lib/ansible/runner/lookup_plugins/first_found.py index d394ee3a2fe..a48b56a3c28 100644 --- a/lib/ansible/runner/lookup_plugins/first_found.py +++ b/lib/ansible/runner/lookup_plugins/first_found.py @@ -172,14 +172,21 @@ class LookupModule(object): else: total_search = terms - result = None for fn in total_search: + if inject and '_original_file' in inject: + # check the templates and vars directories too, + # if they exist + for roledir in ('templates', 'vars'): + path = utils.path_dwim(os.path.join(self.basedir, '..', roledir), fn) + if os.path.exists(path): + return [path] + # if none of the above were found, just check the + # current filename against the basedir (this will already + # have ../files from runner, if it's a role task path = utils.path_dwim(self.basedir, fn) if os.path.exists(path): return [path] - - - if not result: + else: if skip: return [] else: diff --git a/lib/ansible/runner/lookup_plugins/flattened.py b/lib/ansible/runner/lookup_plugins/flattened.py index 831b2e91302..6d9dd613be0 100644 --- a/lib/ansible/runner/lookup_plugins/flattened.py +++ b/lib/ansible/runner/lookup_plugins/flattened.py @@ -59,7 +59,7 @@ class LookupModule(object): # if it's a list, check recursively for items that are a list term = self.flatten(term, inject) ret.extend(term) - else: + else: ret.append(term) return ret diff --git a/lib/ansible/runner/lookup_plugins/url.py b/lib/ansible/runner/lookup_plugins/url.py new file mode 100644 index 00000000000..37a1df6c7af --- /dev/null +++ b/lib/ansible/runner/lookup_plugins/url.py @@ -0,0 +1,48 @@ +# (c) 2015, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible import utils +import urllib2 + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + def run(self, terms, inject=None, **kwargs): + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + if isinstance(terms, basestring): + terms = [ terms ] + + ret = [] + for term in terms: + try: + r = urllib2.Request(term) + response = urllib2.urlopen(r) + except URLError, e: + utils.warnings("Failed lookup url for %s : %s" % (term, str(e))) + continue + except HTTPError, e: + utils.warnings("Recieved HTTP error for %s : %s" % (term, str(e))) + continue + + for line in response.read().splitlines(): + ret.append(line) + + return ret diff --git a/lib/ansible/runner/shell_plugins/powershell.py b/lib/ansible/runner/shell_plugins/powershell.py index 7254df6f7ea..50b759ae633 100644 --- a/lib/ansible/runner/shell_plugins/powershell.py +++ b/lib/ansible/runner/shell_plugins/powershell.py @@ -53,9 +53,11 @@ def _encode_script(script, as_list=False): return cmd_parts return ' '.join(cmd_parts) -def _build_file_cmd(cmd_parts): +def _build_file_cmd(cmd_parts, quote_args=True): '''Build command line to run a file, given list of file name plus args.''' - return ' '.join(_common_args + ['-ExecutionPolicy', 'Unrestricted', '-File'] + ['"%s"' % x for x in cmd_parts]) + if quote_args: + cmd_parts = ['"%s"' % x for x in cmd_parts] + return ' '.join(['&'] + cmd_parts) class ShellModule(object): @@ -84,12 +86,24 @@ class ShellModule(object): # FIXME: Support system temp path! return _encode_script('''(New-Item -Type Directory -Path $env:temp -Name "%s").FullName | Write-Host -Separator '';''' % basefile) - def md5(self, path): + def expand_user(self, user_home_path): + # PowerShell only supports "~" (not "~username"). Resolve-Path ~ does + # not seem to work remotely, though by default we are always starting + # in the user's home directory. + if user_home_path == '~': + script = 'Write-Host (Get-Location).Path' + elif user_home_path.startswith('~\\'): + script = 'Write-Host ((Get-Location).Path + "%s")' % _escape(user_home_path[1:]) + else: + script = 'Write-Host "%s"' % _escape(user_home_path) + return _encode_script(script) + + def checksum(self, path, python_interp): path = _escape(path) script = ''' If (Test-Path -PathType Leaf "%(path)s") { - $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; + $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider; $fp = [System.IO.File]::Open("%(path)s", [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); $fp.Dispose(); @@ -110,7 +124,7 @@ class ShellModule(object): cmd_parts = shlex.split(cmd, posix=False) if not cmd_parts[0].lower().endswith('.ps1'): cmd_parts[0] = '%s.ps1' % cmd_parts[0] - script = _build_file_cmd(cmd_parts) + script = _build_file_cmd(cmd_parts, quote_args=False) if rm_tmp: rm_tmp = _escape(rm_tmp) script = '%s; Remove-Item "%s" -Force -Recurse;' % (script, rm_tmp) diff --git a/lib/ansible/runner/shell_plugins/sh.py b/lib/ansible/runner/shell_plugins/sh.py index 1ee225830b5..d8f1efeb128 100644 --- a/lib/ansible/runner/shell_plugins/sh.py +++ b/lib/ansible/runner/shell_plugins/sh.py @@ -16,9 +16,12 @@ # along with Ansible. If not, see . import os +import re import pipes import ansible.constants as C +_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$') + class ShellModule(object): def env_prefix(self, **kwargs): @@ -59,24 +62,48 @@ class ShellModule(object): cmd += ' && echo %s' % basetmp return cmd - def md5(self, path): - path = pipes.quote(path) + def expand_user(self, user_home_path): + ''' Return a command to expand tildes in a path + + It can be either "~" or "~username". We use the POSIX definition of + a username: + http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426 + http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276 + ''' + + # Check that the user_path to expand is safe + if user_home_path != '~': + if not _USER_HOME_PATH_RE.match(user_home_path): + # pipes.quote will make the shell return the string verbatim + user_home_path = pipes.quote(user_home_path) + return 'echo %s' % user_home_path + + def checksum(self, path, python_interp): # The following test needs to be SH-compliant. BASH-isms will # not work if /bin/sh points to a non-BASH shell. - test = "rc=0; [ -r \"%s\" ] || rc=2; [ -f \"%s\" ] || rc=1; [ -d \"%s\" ] && echo 3 && exit 0" % ((path,) * 3) - md5s = [ - "(/usr/bin/md5sum %s 2>/dev/null)" % path, # Linux - "(/sbin/md5sum -q %s 2>/dev/null)" % path, # ? - "(/usr/bin/digest -a md5 %s 2>/dev/null)" % path, # Solaris 10+ - "(/sbin/md5 -q %s 2>/dev/null)" % path, # Freebsd - "(/usr/bin/md5 -n %s 2>/dev/null)" % path, # Netbsd - "(/bin/md5 -q %s 2>/dev/null)" % path, # Openbsd - "(/usr/bin/csum -h MD5 %s 2>/dev/null)" % path, # AIX - "(/bin/csum -h MD5 %s 2>/dev/null)" % path # AIX also + # + # In the following test, each condition is a check and logical + # comparison (|| or &&) that sets the rc value. Every check is run so + # the last check in the series to fail will be the rc that is + # returned. + # + # If a check fails we error before invoking the hash functions because + # hash functions may successfully take the hash of a directory on BSDs + # (UFS filesystem?) which is not what the rest of the ansible code + # expects + # + # If all of the available hashing methods fail we fail with an rc of + # 0. This logic is added to the end of the cmd at the bottom of this + # function. + + test = "rc=flag; [ -r \'%(p)s\' ] || rc=2; [ -f \'%(p)s\' ] || rc=1; [ -d \'%(p)s\' ] && rc=3; %(i)s -V 2>/dev/null || rc=4; [ x\"$rc\" != \"xflag\" ] && echo \"${rc}\"\' %(p)s\' && exit 0" % dict(p=path, i=python_interp) + csums = [ + "(%s -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();\nafile = open(\"%s\", \"rb\")\nbuf = afile.read(BLOCKSIZE)\nwhile len(buf) > 0:\n\thasher.update(buf)\n\tbuf = afile.read(BLOCKSIZE)\nafile.close()\nprint(hasher.hexdigest())' 2>/dev/null)" % (python_interp, path), # Python > 2.4 (including python3) + "(%s -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();\nafile = open(\"%s\", \"rb\")\nbuf = afile.read(BLOCKSIZE)\nwhile len(buf) > 0:\n\thasher.update(buf)\n\tbuf = afile.read(BLOCKSIZE)\nafile.close()\nprint(hasher.hexdigest())' 2>/dev/null)" % (python_interp, path), # Python == 2.4 ] - cmd = " || ".join(md5s) - cmd = "%s; %s || (echo \"${rc} %s\")" % (test, cmd, path) + cmd = " || ".join(csums) + cmd = "%s; %s || (echo \'0 %s\')" % (test, cmd, path) return cmd def build_module_command(self, env_string, shebang, cmd, rm_tmp=None): diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 6da1f9a0301..8394a8f4f96 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -29,8 +29,10 @@ from ansible import __version__ from ansible.utils.display_functions import * from ansible.utils.plugins import * from ansible.utils.su_prompts import * +from ansible.utils.hashing import secure_hash, secure_hash_s, checksum, checksum_s, md5, md5s from ansible.callbacks import display from ansible.module_utils.splitter import split_args, unquote +from ansible.module_utils.basic import heuristic_log_sanitize import ansible.constants as C import ast import time @@ -45,7 +47,6 @@ import warnings import traceback import getpass import sys -import json import subprocess import contextlib @@ -63,14 +64,10 @@ CODE_REGEX = re.compile(r'(?:{%|%})') try: - import json -except ImportError: + # simplejson can be much faster if it's available import simplejson as json - -try: - from hashlib import md5 as _md5 except ImportError: - from md5 import md5 as _md5 + import json PASSLIB_AVAILABLE = False try: @@ -362,7 +359,7 @@ def repo_url_to_role_name(repo_url): # gets the role name out of a repo like # http://git.example.com/repos/repo.git" => "repo" - if '://' not in repo_url: + if '://' not in repo_url and '@' not in repo_url: return repo_url trailing_path = repo_url.split('/')[-1] if trailing_path.endswith('.git'): @@ -387,15 +384,12 @@ def role_spec_parse(role_spec): role_spec = role_spec.strip() role_version = '' + default_role_versions = dict(git='master', hg='tip') if role_spec == "" or role_spec.startswith("#"): return (None, None, None, None) tokens = [s.strip() for s in role_spec.split(',')] - if not tokens[0].endswith('.tar.gz'): - # pick a reasonable default branch - role_version = 'master' - # assume https://github.com URLs are git+https:// URLs and not # tarballs unless they end in '.zip' if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'): @@ -412,25 +406,53 @@ def role_spec_parse(role_spec): role_name = tokens[2] else: role_name = repo_url_to_role_name(tokens[0]) + if scm and not role_version: + role_version = default_role_versions.get(scm, '') return dict(scm=scm, src=role_url, version=role_version, name=role_name) def role_yaml_parse(role): - if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'): - role["src"] = "git+" + role["src"] - if '+' in role["src"]: - (scm, src) = role["src"].split('+') - role["scm"] = scm - role["src"] = src - if 'name' not in role: - role["name"] = repo_url_to_role_name(role["src"]) + if 'role' in role: + # Old style: {role: "galaxy.role,version,name", other_vars: "here" } + role_info = role_spec_parse(role['role']) + if isinstance(role_info, dict): + # Warning: Slight change in behaviour here. name may be being + # overloaded. Previously, name was only a parameter to the role. + # Now it is both a parameter to the role and the name that + # ansible-galaxy will install under on the local system. + if 'name' in role and 'name' in role_info: + del role_info['name'] + role.update(role_info) + else: + # New style: { src: 'galaxy.role,version,name', other_vars: "here" } + if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'): + role["src"] = "git+" + role["src"] + + if '+' in role["src"]: + (scm, src) = role["src"].split('+') + role["scm"] = scm + role["src"] = src + + if 'name' not in role: + role["name"] = repo_url_to_role_name(role["src"]) + + if 'version' not in role: + role['version'] = '' + + if 'scm' not in role: + role['scm'] = None + return role def json_loads(data): ''' parse a JSON string and return a data structure ''' + try: + loaded = json.loads(data) + except ValueError,e: + raise errors.AnsibleError("Unable to read provided data as JSON: %s" % str(e)) - return json.loads(data) + return loaded def _clean_data(orig_data, from_remote=False, from_inventory=False): ''' remove jinja2 template tags from a string ''' @@ -738,6 +760,11 @@ def parse_yaml_from_file(path, vault_password=None): vault = VaultLib(password=vault_password) if vault.is_encrypted(data): + # if the file is encrypted and no password was specified, + # the decrypt call would throw an error, but we check first + # since the decrypt function doesn't know the file name + if vault_password is None: + raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path) data = vault.decrypt(data) show_content = False @@ -794,45 +821,16 @@ def merge_hash(a, b): return result -def md5s(data): - ''' Return MD5 hex digest of data. ''' - - digest = _md5() - try: - digest.update(data) - except UnicodeEncodeError: - digest.update(data.encode('utf-8')) - return digest.hexdigest() - -def md5(filename): - ''' Return MD5 hex digest of local file, None if file is not present or a directory. ''' - - if not os.path.exists(filename) or os.path.isdir(filename): - return None - digest = _md5() - blocksize = 64 * 1024 - try: - infile = open(filename, 'rb') - block = infile.read(blocksize) - while block: - digest.update(block) - block = infile.read(blocksize) - infile.close() - except IOError, e: - raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e)) - return digest.hexdigest() - def default(value, function): ''' syntactic sugar around lazy evaluation of defaults ''' if value is None: return function() return value -def _gitinfo(): + +def _git_repo_info(repo_path): ''' returns a string containing git branch, commit id and commit date ''' result = None - repo_path = os.path.join(os.path.dirname(__file__), '..', '..', '..', '.git') - if os.path.exists(repo_path): # Check if the .git is a file. If it is a file, it means that we are in a submodule structure. if os.path.isfile(repo_path): @@ -842,7 +840,7 @@ def _gitinfo(): if os.path.isabs(gitdir): repo_path = gitdir else: - repo_path = os.path.join(repo_path.split('.git')[0], gitdir) + repo_path = os.path.join(repo_path[:-4], gitdir) except (IOError, AttributeError): return '' f = open(os.path.join(repo_path, "HEAD")) @@ -853,22 +851,50 @@ def _gitinfo(): f = open(branch_path) commit = f.readline()[:10] f.close() - date = time.localtime(os.stat(branch_path).st_mtime) - if time.daylight == 0: - offset = time.timezone - else: - offset = time.altzone - result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, - time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36) + else: + # detached HEAD + commit = branch[:10] + branch = 'detached HEAD' + branch_path = os.path.join(repo_path, "HEAD") + + date = time.localtime(os.stat(branch_path).st_mtime) + if time.daylight == 0: + offset = time.timezone + else: + offset = time.altzone + result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, + time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36) else: result = '' return result + +def _gitinfo(): + basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..') + repo_path = os.path.join(basedir, '.git') + result = _git_repo_info(repo_path) + submodules = os.path.join(basedir, '.gitmodules') + if not os.path.exists(submodules): + return result + f = open(submodules) + for line in f: + tokens = line.strip().split(' ') + if tokens[0] == 'path': + submodule_path = tokens[2] + submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git')) + if not submodule_info: + submodule_info = ' not found - use git submodule update --init ' + submodule_path + result += "\n {0}: {1}".format(submodule_path, submodule_info) + f.close() + return result + + def version(prog): result = "{0} {1}".format(prog, __version__) gitinfo = _gitinfo() if gitinfo: result = result + " {0}".format(gitinfo) + result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH return result def version_info(gitinfo=False): @@ -911,34 +937,18 @@ def sanitize_output(str): private_keys = ['password', 'login_password'] - filter_re = [ - # filter out things like user:pass@foo/whatever - # and http://username:pass@wherever/foo - re.compile('^(?P.*:)(?P.*)(?P\@.*)$'), - ] + parts = parse_kv(str) + output = [] + for (k, v) in parts.items(): + if k in private_keys: + output.append("%s=VALUE_HIDDEN" % k) + continue + else: + v = heuristic_log_sanitize(v) + output.append('%s=%s' % (k, v)) + output = ' '.join(output) + return output - parts = str.split() - output = '' - for part in parts: - try: - (k,v) = part.split('=', 1) - if k in private_keys: - output += " %s=VALUE_HIDDEN" % k - else: - found = False - for filter in filter_re: - m = filter.match(v) - if m: - d = m.groupdict() - output += " %s=%s" % (k, d['before'] + "********" + d['after']) - found = True - break - if not found: - output += " %s" % part - except: - output += " %s" % part - - return output.strip() #################################################################### # option handling code for /usr/bin/ansible and ansible-playbook @@ -968,6 +978,8 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, parser.add_option('-i', '--inventory-file', dest='inventory', help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST, default=constants.DEFAULT_HOST_LIST) + parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", + help="set additional variables as key=value or YAML/JSON", default=[]) parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true', help='ask for SSH password') parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', @@ -1038,6 +1050,21 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, return parser +def parse_extra_vars(extra_vars_opts, vault_pass): + extra_vars = {} + for extra_vars_opt in extra_vars_opts: + extra_vars_opt = to_unicode(extra_vars_opt) + if extra_vars_opt.startswith(u"@"): + # Argument is a YAML file (JSON is a subset of YAML) + extra_vars = combine_vars(extra_vars, parse_yaml_from_file(extra_vars_opt[1:], vault_password=vault_pass)) + elif extra_vars_opt and extra_vars_opt[0] in u'[{': + # Arguments as YAML + extra_vars = combine_vars(extra_vars, parse_yaml(extra_vars_opt)) + else: + # Arguments as Key-value + extra_vars = combine_vars(extra_vars, parse_kv(extra_vars_opt)) + return extra_vars + def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False): vault_pass = None @@ -1144,7 +1171,7 @@ def boolean(value): else: return False -def make_sudo_cmd(sudo_user, executable, cmd): +def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd): """ helper function for connection plugins to create sudo commands """ @@ -1159,7 +1186,7 @@ def make_sudo_cmd(sudo_user, executable, cmd): prompt = '[sudo via ansible, key=%s] password: ' % randbits success_key = 'SUDO-SUCCESS-%s' % randbits sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % ( - C.DEFAULT_SUDO_EXE, C.DEFAULT_SUDO_EXE, C.DEFAULT_SUDO_FLAGS, + sudo_exe, sudo_exe, C.DEFAULT_SUDO_FLAGS, prompt, sudo_user, executable or '$SHELL', pipes.quote('echo %s; %s' % (success_key, cmd))) return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt, success_key) @@ -1177,13 +1204,25 @@ def make_su_cmd(su_user, executable, cmd): ) return ('/bin/sh -c ' + pipes.quote(sudocmd), None, success_key) +# For v2, consider either using kitchen or copying my code from there for +# to_unicode and to_bytes handling (TEK) _TO_UNICODE_TYPES = (unicode, type(None)) def to_unicode(value): + # Use with caution -- this function is not encoding safe (non-utf-8 values + # will cause tracebacks if they contain bytes from 0x80-0xff inclusive) if isinstance(value, _TO_UNICODE_TYPES): return value return value.decode("utf-8") +def to_bytes(value): + # Note: value is assumed to be a basestring to mirror to_unicode. Better + # implementations (like kitchen.text.converters.to_bytes) bring that check + # into the function + if isinstance(value, str): + return value + return value.encode('utf-8') + def get_diff(diff): # called by --diff usage in playbook and runner via callbacks # include names in diffs 'before' and 'after' and do diff -U 10 diff --git a/lib/ansible/utils/hashing.py b/lib/ansible/utils/hashing.py new file mode 100644 index 00000000000..a7d142e5bd4 --- /dev/null +++ b/lib/ansible/utils/hashing.py @@ -0,0 +1,91 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +# Note, sha1 is the only hash algorithm compatible with python2.4 and with +# FIPS-140 mode (as of 11-2014) +try: + from hashlib import sha1 as sha1 +except ImportError: + from sha import sha as sha1 + +# Backwards compat only +try: + from hashlib import md5 as _md5 +except ImportError: + try: + from md5 import md5 as _md5 + except ImportError: + # Assume we're running in FIPS mode here + _md5 = None + +def secure_hash_s(data, hash_func=sha1): + ''' Return a secure hash hex digest of data. ''' + + digest = hash_func() + try: + digest.update(data) + except UnicodeEncodeError: + digest.update(data.encode('utf-8')) + return digest.hexdigest() + +def secure_hash(filename, hash_func=sha1): + ''' Return a secure hash hex digest of local file, None if file is not present or a directory. ''' + + if not os.path.exists(filename) or os.path.isdir(filename): + return None + digest = hash_func() + blocksize = 64 * 1024 + try: + infile = open(filename, 'rb') + block = infile.read(blocksize) + while block: + digest.update(block) + block = infile.read(blocksize) + infile.close() + except IOError, e: + raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e)) + return digest.hexdigest() + +# The checksum algorithm must match with the algorithm in ShellModule.checksum() method +checksum = secure_hash +checksum_s = secure_hash_s + +# Backwards compat functions. Some modules include md5s in their return values +# Continue to support that for now. As of ansible-1.8, all of those modules +# should also return "checksum" (sha1 for now) +# Do not use md5 unless it is needed for: +# 1) Optional backwards compatibility +# 2) Compliance with a third party protocol +# +# MD5 will not work on systems which are FIPS-140-2 compliant. + +def md5s(data): + if not _md5: + raise ValueError('MD5 not available. Possibly running in FIPS mode') + return secure_hash_s(data, _md5) + +def md5(filename): + if not _md5: + raise ValueError('MD5 not available. Possibly running in FIPS mode') + return secure_hash(filename, _md5) + diff --git a/lib/ansible/utils/module_docs_fragments/files.py b/lib/ansible/utils/module_docs_fragments/files.py index d54d0ece5ab..adff1f2f1bf 100644 --- a/lib/ansible/utils/module_docs_fragments/files.py +++ b/lib/ansible/utils/module_docs_fragments/files.py @@ -67,4 +67,12 @@ options: - level part of the SELinux file context. This is the MLS/MCS attribute, sometimes known as the C(range). C(_default) feature works as for I(seuser). + follow: + required: false + default: "no" + choices: [ "yes", "no" ] + version_added: "1.8" + description: + - 'This flag indicates that filesystem links, if they exist, should be followed.' + """ diff --git a/lib/ansible/utils/plugins.py b/lib/ansible/utils/plugins.py index 327bc99ceca..29771d0ed97 100644 --- a/lib/ansible/utils/plugins.py +++ b/lib/ansible/utils/plugins.py @@ -75,6 +75,15 @@ class PluginLoader(object): ret.append(i) return os.pathsep.join(ret) + def _all_directories(self, dir): + results = [] + results.append(dir) + for root, subdirs, files in os.walk(dir): + if '__init__.py' in files: + for x in subdirs: + results.append(os.path.join(root,x)) + return results + def _get_package_paths(self): ''' Gets the path of a Python package ''' @@ -85,10 +94,8 @@ class PluginLoader(object): m = __import__(self.package) parts = self.package.split('.')[1:] self.package_path = os.path.join(os.path.dirname(m.__file__), *parts) - paths.append(self.package_path) - return paths - else: - return [ self.package_path ] + paths.extend(self._all_directories(self.package_path)) + return paths def _get_paths(self): ''' Return a list of paths to search for plugins in ''' @@ -96,12 +103,19 @@ class PluginLoader(object): if self._paths is not None: return self._paths - ret = [] - ret += self._extra_dirs + ret = self._extra_dirs[:] for basedir in _basedirs: fullpath = os.path.realpath(os.path.join(basedir, self.subdir)) if os.path.isdir(fullpath): + files = glob.glob("%s/*" % fullpath) + + # allow directories to be two levels deep + files2 = glob.glob("%s/*/*" % fullpath) + + if files2 is not None: + files.extend(files2) + for file in files: if os.path.isdir(file) and file not in ret: ret.append(file) @@ -109,56 +123,67 @@ class PluginLoader(object): ret.append(fullpath) # look in any configured plugin paths, allow one level deep for subcategories - configured_paths = self.config.split(os.pathsep) - for path in configured_paths: - path = os.path.realpath(os.path.expanduser(path)) - contents = glob.glob("%s/*" % path) - for c in contents: - if os.path.isdir(c) and c not in ret: - ret.append(c) - if path not in ret: - ret.append(path) + if self.config is not None: + configured_paths = self.config.split(os.pathsep) + for path in configured_paths: + path = os.path.realpath(os.path.expanduser(path)) + contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path) + for c in contents: + if os.path.isdir(c) and c not in ret: + ret.append(c) + if path not in ret: + ret.append(path) # look for any plugins installed in the package subtree ret.extend(self._get_package_paths()) + # cache and return the result self._paths = ret - return ret def add_directory(self, directory, with_subdir=False): ''' Adds an additional directory to the search path ''' - self._paths = None directory = os.path.realpath(directory) if directory is not None: if with_subdir: directory = os.path.join(directory, self.subdir) if directory not in self._extra_dirs: + # append the directory and invalidate the path cache self._extra_dirs.append(directory) + self._paths = None - def find_plugin(self, name, suffixes=None): + def find_plugin(self, name, suffixes=None, transport=''): ''' Find a plugin named name ''' if not suffixes: if self.class_name: suffixes = ['.py'] else: - suffixes = ['', '.ps1'] + if transport == 'winrm': + suffixes = ['.ps1', ''] + else: + suffixes = ['.py', ''] - for suffix in suffixes: - full_name = '%s%s' % (name, suffix) - if full_name in self._plugin_path_cache: - return self._plugin_path_cache[full_name] + # loop over paths and then loop over suffixes to find plugin + for i in self._get_paths(): + for suffix in suffixes: + full_name = '%s%s' % (name, suffix) + + if full_name in self._plugin_path_cache: + return self._plugin_path_cache[full_name] - for i in self._get_paths(): path = os.path.join(i, full_name) if os.path.isfile(path): self._plugin_path_cache[full_name] = path return path + # if nothing is found, try finding alias/deprecated + if not name.startswith('_'): + return self.find_plugin('_' + name, suffixes, transport) + return None def has_plugin(self, name): @@ -232,7 +257,7 @@ shell_loader = PluginLoader( module_finder = PluginLoader( '', - '', + 'ansible.modules', C.DEFAULT_MODULE_PATH, 'library' ) diff --git a/lib/ansible/utils/su_prompts.py b/lib/ansible/utils/su_prompts.py index f6641a43217..04e98e1c45e 100644 --- a/lib/ansible/utils/su_prompts.py +++ b/lib/ansible/utils/su_prompts.py @@ -53,7 +53,7 @@ SU_PROMPT_LOCALIZATIONS = [ '密碼', ] -SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join([x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE) +SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join(['(\w+\'s )?' + x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE) def check_su_prompt(data): return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data)) diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index d1026590033..9556b8fcea8 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -33,6 +33,7 @@ import ast import traceback from ansible.utils.string_functions import count_newlines_from_end +from ansible.utils import to_bytes class Globals(object): @@ -86,47 +87,50 @@ JINJA2_ALLOWED_OVERRIDES = ['trim_blocks', 'lstrip_blocks', 'newline_sequence', def lookup(name, *args, **kwargs): from ansible import utils instance = utils.plugins.lookup_loader.get(name.lower(), basedir=kwargs.get('basedir',None)) - vars = kwargs.get('vars', None) + tvars = kwargs.get('vars', None) if instance is not None: - # safely catch run failures per #5059 try: - ran = instance.run(*args, inject=vars, **kwargs) + ran = instance.run(*args, inject=tvars, **kwargs) + except errors.AnsibleError: + raise + except jinja2.exceptions.UndefinedError, e: + raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e)) except Exception, e: - ran = None + raise errors.AnsibleError('Unexpected error in during lookup: %s' % e) if ran: ran = ",".join(ran) return ran else: raise errors.AnsibleError("lookup plugin (%s) not found" % name) -def template(basedir, varname, vars, lookup_fatal=True, depth=0, expand_lists=True, convert_bare=False, fail_on_undefined=False, filter_fatal=True): +def template(basedir, varname, templatevars, lookup_fatal=True, depth=0, expand_lists=True, convert_bare=False, fail_on_undefined=False, filter_fatal=True): ''' templates a data structure by traversing it and substituting for other data structures ''' from ansible import utils try: if convert_bare and isinstance(varname, basestring): first_part = varname.split(".")[0].split("[")[0] - if first_part in vars and '{{' not in varname and '$' not in varname: + if first_part in templatevars and '{{' not in varname and '$' not in varname: varname = "{{%s}}" % varname - + if isinstance(varname, basestring): if '{{' in varname or '{%' in varname: - varname = template_from_string(basedir, varname, vars, fail_on_undefined) + varname = template_from_string(basedir, varname, templatevars, fail_on_undefined) if (varname.startswith("{") and not varname.startswith("{{")) or varname.startswith("["): - eval_results = utils.safe_eval(varname, locals=vars, include_exceptions=True) + eval_results = utils.safe_eval(varname, locals=templatevars, include_exceptions=True) if eval_results[1] is None: varname = eval_results[0] return varname - + elif isinstance(varname, (list, tuple)): - return [template(basedir, v, vars, lookup_fatal, depth, expand_lists, fail_on_undefined=fail_on_undefined) for v in varname] + return [template(basedir, v, templatevars, lookup_fatal, depth, expand_lists, convert_bare, fail_on_undefined, filter_fatal) for v in varname] elif isinstance(varname, dict): d = {} for (k, v) in varname.iteritems(): - d[k] = template(basedir, v, vars, lookup_fatal, depth, expand_lists, fail_on_undefined=fail_on_undefined) + d[k] = template(basedir, v, templatevars, lookup_fatal, depth, expand_lists, convert_bare, fail_on_undefined, filter_fatal) return d else: return varname @@ -166,6 +170,7 @@ class _jinja2_vars(object): return False def __getitem__(self, varname): + from ansible.runner import HostVars if varname not in self.vars: for i in self.extras: if varname in i: @@ -175,8 +180,9 @@ class _jinja2_vars(object): else: raise KeyError("undefined variable: %s" % varname) var = self.vars[varname] - # HostVars is special, return it as-is - if isinstance(var, dict) and type(var) != dict: + # HostVars is special, return it as-is, as is the special variable + # 'vars', which contains the vars structure + if isinstance(var, dict) and varname == "vars" or isinstance(var, HostVars): return var else: return template(self.basedir, var, self.vars, fail_on_undefined=self.fail_on_undefined) @@ -267,7 +273,7 @@ def template_from_file(basedir, path, vars, vault_password=None): managed_str = managed_default.format( host = vars['template_host'], uid = vars['template_uid'], - file = vars['template_path'] + file = to_bytes(vars['template_path']) ) vars['ansible_managed'] = time.strftime( managed_str, @@ -338,6 +344,8 @@ def template_from_string(basedir, data, vars, fail_on_undefined=False): try: t = environment.from_string(data) + except TemplateSyntaxError, e: + raise errors.AnsibleError("template error while templating string: %s" % str(e)) except Exception, e: if 'recursion' in str(e): raise errors.AnsibleError("recursive loop detected in template string: %s" % data) diff --git a/lib/ansible/utils/unicode.py b/lib/ansible/utils/unicode.py new file mode 100644 index 00000000000..b2fcf65161b --- /dev/null +++ b/lib/ansible/utils/unicode.py @@ -0,0 +1,248 @@ +# (c) 2012-2014, Toshio Kuraotmi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# to_bytes and to_unicode were written by Toshio Kuratomi for the +# python-kitchen library https://pypi.python.org/pypi/kitchen +# They are licensed in kitchen under the terms of the GPLv2+ +# They were copied and modified for use in ansible by Toshio in Jan 2015 +# (simply removing the deprecated features) + +#: Aliases for the utf-8 codec +_UTF8_ALIASES = frozenset(('utf-8', 'UTF-8', 'utf8', 'UTF8', 'utf_8', 'UTF_8', + 'utf', 'UTF', 'u8', 'U8')) +#: Aliases for the latin-1 codec +_LATIN1_ALIASES = frozenset(('latin-1', 'LATIN-1', 'latin1', 'LATIN1', + 'latin', 'LATIN', 'l1', 'L1', 'cp819', 'CP819', '8859', 'iso8859-1', + 'ISO8859-1', 'iso-8859-1', 'ISO-8859-1')) + +# EXCEPTION_CONVERTERS is defined below due to using to_unicode + +def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None): + '''Convert an object into a :class:`unicode` string + + :arg obj: Object to convert to a :class:`unicode` string. This should + normally be a byte :class:`str` + :kwarg encoding: What encoding to try converting the byte :class:`str` as. + Defaults to :term:`utf-8` + :kwarg errors: If errors are found while decoding, perform this action. + Defaults to ``replace`` which replaces the invalid bytes with + a character that means the bytes were unable to be decoded. Other + values are the same as the error handling schemes in the `codec base + classes + `_. + For instance ``strict`` which raises an exception and ``ignore`` which + simply omits the non-decodable characters. + :kwarg nonstring: How to treat nonstring values. Possible values are: + + :simplerepr: Attempt to call the object's "simple representation" + method and return that value. Python-2.3+ has two methods that + try to return a simple representation: :meth:`object.__unicode__` + and :meth:`object.__str__`. We first try to get a usable value + from :meth:`object.__unicode__`. If that fails we try the same + with :meth:`object.__str__`. + :empty: Return an empty :class:`unicode` string + :strict: Raise a :exc:`TypeError` + :passthru: Return the object unchanged + :repr: Attempt to return a :class:`unicode` string of the repr of the + object + + Default is ``simplerepr`` + + :raises TypeError: if :attr:`nonstring` is ``strict`` and + a non-:class:`basestring` object is passed in or if :attr:`nonstring` + is set to an unknown value + :raises UnicodeDecodeError: if :attr:`errors` is ``strict`` and + :attr:`obj` is not decodable using the given encoding + :returns: :class:`unicode` string or the original object depending on the + value of :attr:`nonstring`. + + Usually this should be used on a byte :class:`str` but it can take both + byte :class:`str` and :class:`unicode` strings intelligently. Nonstring + objects are handled in different ways depending on the setting of the + :attr:`nonstring` parameter. + + The default values of this function are set so as to always return + a :class:`unicode` string and never raise an error when converting from + a byte :class:`str` to a :class:`unicode` string. However, when you do + not pass validly encoded text (or a nonstring object), you may end up with + output that you don't expect. Be sure you understand the requirements of + your data, not just ignore errors by passing it through this function. + ''' + # Could use isbasestring/isunicode here but we want this code to be as + # fast as possible + if isinstance(obj, basestring): + if isinstance(obj, unicode): + return obj + if encoding in _UTF8_ALIASES: + return unicode(obj, 'utf-8', errors) + if encoding in _LATIN1_ALIASES: + return unicode(obj, 'latin-1', errors) + return obj.decode(encoding, errors) + + if not nonstring: + nonstring = 'simplerepr' + if nonstring == 'empty': + return u'' + elif nonstring == 'passthru': + return obj + elif nonstring == 'simplerepr': + try: + simple = obj.__unicode__() + except (AttributeError, UnicodeError): + simple = None + if not simple: + try: + simple = str(obj) + except UnicodeError: + try: + simple = obj.__str__() + except (UnicodeError, AttributeError): + simple = u'' + if isbytestring(simple): + return unicode(simple, encoding, errors) + return simple + elif nonstring in ('repr', 'strict'): + obj_repr = repr(obj) + if isbytestring(obj_repr): + obj_repr = unicode(obj_repr, encoding, errors) + if nonstring == 'repr': + return obj_repr + raise TypeError('to_unicode was given "%(obj)s" which is neither' + ' a byte string (str) or a unicode string' % + {'obj': obj_repr.encode(encoding, 'replace')}) + + raise TypeError('nonstring value, %(param)s, is not set to a valid' + ' action' % {'param': nonstring}) + +def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None): + '''Convert an object into a byte :class:`str` + + :arg obj: Object to convert to a byte :class:`str`. This should normally + be a :class:`unicode` string. + :kwarg encoding: Encoding to use to convert the :class:`unicode` string + into a byte :class:`str`. Defaults to :term:`utf-8`. + :kwarg errors: If errors are found while encoding, perform this action. + Defaults to ``replace`` which replaces the invalid bytes with + a character that means the bytes were unable to be encoded. Other + values are the same as the error handling schemes in the `codec base + classes + `_. + For instance ``strict`` which raises an exception and ``ignore`` which + simply omits the non-encodable characters. + :kwarg nonstring: How to treat nonstring values. Possible values are: + + :simplerepr: Attempt to call the object's "simple representation" + method and return that value. Python-2.3+ has two methods that + try to return a simple representation: :meth:`object.__unicode__` + and :meth:`object.__str__`. We first try to get a usable value + from :meth:`object.__str__`. If that fails we try the same + with :meth:`object.__unicode__`. + :empty: Return an empty byte :class:`str` + :strict: Raise a :exc:`TypeError` + :passthru: Return the object unchanged + :repr: Attempt to return a byte :class:`str` of the :func:`repr` of the + object + + Default is ``simplerepr``. + + :raises TypeError: if :attr:`nonstring` is ``strict`` and + a non-:class:`basestring` object is passed in or if :attr:`nonstring` + is set to an unknown value. + :raises UnicodeEncodeError: if :attr:`errors` is ``strict`` and all of the + bytes of :attr:`obj` are unable to be encoded using :attr:`encoding`. + :returns: byte :class:`str` or the original object depending on the value + of :attr:`nonstring`. + + .. warning:: + + If you pass a byte :class:`str` into this function the byte + :class:`str` is returned unmodified. It is **not** re-encoded with + the specified :attr:`encoding`. The easiest way to achieve that is:: + + to_bytes(to_unicode(text), encoding='utf-8') + + The initial :func:`to_unicode` call will ensure text is + a :class:`unicode` string. Then, :func:`to_bytes` will turn that into + a byte :class:`str` with the specified encoding. + + Usually, this should be used on a :class:`unicode` string but it can take + either a byte :class:`str` or a :class:`unicode` string intelligently. + Nonstring objects are handled in different ways depending on the setting + of the :attr:`nonstring` parameter. + + The default values of this function are set so as to always return a byte + :class:`str` and never raise an error when converting from unicode to + bytes. However, when you do not pass an encoding that can validly encode + the object (or a non-string object), you may end up with output that you + don't expect. Be sure you understand the requirements of your data, not + just ignore errors by passing it through this function. + ''' + # Could use isbasestring, isbytestring here but we want this to be as fast + # as possible + if isinstance(obj, basestring): + if isinstance(obj, str): + return obj + return obj.encode(encoding, errors) + if not nonstring: + nonstring = 'simplerepr' + + if nonstring == 'empty': + return '' + elif nonstring == 'passthru': + return obj + elif nonstring == 'simplerepr': + try: + simple = str(obj) + except UnicodeError: + try: + simple = obj.__str__() + except (AttributeError, UnicodeError): + simple = None + if not simple: + try: + simple = obj.__unicode__() + except (AttributeError, UnicodeError): + simple = '' + if isunicodestring(simple): + simple = simple.encode(encoding, 'replace') + return simple + elif nonstring in ('repr', 'strict'): + try: + obj_repr = obj.__repr__() + except (AttributeError, UnicodeError): + obj_repr = '' + if isunicodestring(obj_repr): + obj_repr = obj_repr.encode(encoding, errors) + else: + obj_repr = str(obj_repr) + if nonstring == 'repr': + return obj_repr + raise TypeError('to_bytes was given "%(obj)s" which is neither' + ' a unicode string or a byte string (str)' % {'obj': obj_repr}) + + raise TypeError('nonstring value, %(param)s, is not set to a valid' + ' action' % {'param': nonstring}) + + +# force the return value of a function to be unicode. Use with partial to +# ensure that a filter will return unicode values. +def unicode_wrap(func, *args, **kwargs): + return to_unicode(func(*args, **kwargs), nonstring='passthru') diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index 3b83d2989e9..842688a2c18 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -26,7 +26,18 @@ from io import BytesIO from subprocess import call from ansible import errors from hashlib import sha256 -from hashlib import md5 + +# Note: Only used for loading obsolete VaultAES files. All files are written +# using the newer VaultAES256 which does not require md5 +try: + from hashlib import md5 +except ImportError: + try: + from md5 import md5 + except ImportError: + # MD5 unavailable. Possibly FIPS mode + md5 = None + from binascii import hexlify from binascii import unhexlify from ansible import constants as C @@ -181,6 +192,38 @@ class VaultEditor(object): self.password = password self.filename = filename + def _edit_file_helper(self, existing_data=None, cipher=None): + # make sure the umask is set to a sane value + old_umask = os.umask(0o077) + + # Create a tempfile + _, tmp_path = tempfile.mkstemp() + + if existing_data: + self.write_data(existing_data, tmp_path) + + # drop the user into an editor on the tmp file + try: + call(self._editor_shell_command(tmp_path)) + except OSError, e: + raise Exception("Failed to open editor (%s): %s" % (self._editor_shell_command(tmp_path)[0],str(e))) + tmpdata = self.read_data(tmp_path) + + # create new vault + this_vault = VaultLib(self.password) + if cipher: + this_vault.cipher_name = cipher + + # encrypt new data and write out to tmp + enc_data = this_vault.encrypt(tmpdata) + self.write_data(enc_data, tmp_path) + + # shuffle tmp file into place + self.shuffle_files(tmp_path, self.filename) + + # and restore umask + os.umask(old_umask) + def create_file(self): """ create a new encrypted file """ @@ -190,15 +233,8 @@ class VaultEditor(object): if os.path.isfile(self.filename): raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename) - # drop the user into vim on file - old_umask = os.umask(0077) - call(self._editor_shell_command(self.filename)) - tmpdata = self.read_data(self.filename) - this_vault = VaultLib(self.password) - this_vault.cipher_name = self.cipher_name - enc_data = this_vault.encrypt(tmpdata) - self.write_data(enc_data, self.filename) - os.umask(old_umask) + # Let the user specify contents and save file + self._edit_file_helper(cipher=self.cipher_name) def decrypt_file(self): @@ -207,7 +243,7 @@ class VaultEditor(object): if not os.path.isfile(self.filename): raise errors.AnsibleError("%s does not exist" % self.filename) - + tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) if this_vault.is_encrypted(tmpdata): @@ -224,35 +260,17 @@ class VaultEditor(object): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: raise errors.AnsibleError(CRYPTO_UPGRADE) - # make sure the umask is set to a sane value - old_mask = os.umask(0077) - # decrypt to tmpfile tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) dec_data = this_vault.decrypt(tmpdata) - _, tmp_path = tempfile.mkstemp() - self.write_data(dec_data, tmp_path) - # drop the user into vim on the tmp file - call(self._editor_shell_command(tmp_path)) - new_data = self.read_data(tmp_path) + # let the user edit the data and save + self._edit_file_helper(existing_data=dec_data) + ###we want the cipher to default to AES256 (get rid of files + # encrypted with the AES cipher) + #self._edit_file_helper(existing_data=dec_data, cipher=this_vault.cipher_name) - # create new vault - new_vault = VaultLib(self.password) - - # we want the cipher to default to AES256 - #new_vault.cipher_name = this_vault.cipher_name - - # encrypt new data a write out to tmp - enc_data = new_vault.encrypt(new_data) - self.write_data(enc_data, tmp_path) - - # shuffle tmp file into place - self.shuffle_files(tmp_path, self.filename) - - # and restore the old umask - os.umask(old_mask) def view_file(self): @@ -263,8 +281,10 @@ class VaultEditor(object): tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) dec_data = this_vault.decrypt(tmpdata) + old_umask = os.umask(0o077) _, tmp_path = tempfile.mkstemp() self.write_data(dec_data, tmp_path) + os.umask(old_umask) # drop the user into pager on the tmp file call(self._pager_shell_command(tmp_path)) @@ -277,7 +297,7 @@ class VaultEditor(object): if not os.path.isfile(self.filename): raise errors.AnsibleError("%s does not exist" % self.filename) - + tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) this_vault.cipher_name = self.cipher_name @@ -352,6 +372,8 @@ class VaultAES(object): # http://stackoverflow.com/a/16761459 def __init__(self): + if not md5: + raise errors.AnsibleError('md5 hash is unavailable (Could be due to FIPS mode). Legacy VaultAES format is unavailable.') if not HAS_AES: raise errors.AnsibleError(CRYPTO_UPGRADE) diff --git a/library/cloud/azure b/library/cloud/azure deleted file mode 100644 index 1679fbc45d1..00000000000 --- a/library/cloud/azure +++ /dev/null @@ -1,484 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: azure -short_description: create or terminate a virtual machine in azure -description: - - Creates or terminates azure instances. When created optionally waits for it to be 'running'. This module has a dependency on python-azure >= 0.7.1 -version_added: "1.7" -options: - name: - description: - - name of the virtual machine and associated cloud service. - required: true - default: null - location: - description: - - the azure location to use (e.g. 'East US') - required: true - default: null - subscription_id: - description: - - azure subscription id. Overrides the AZURE_SUBSCRIPTION_ID environement variable. - required: false - default: null - management_cert_path: - description: - - path to an azure management certificate associated with the subscription id. Overrides the AZURE_CERT_PATH environement variable. - required: false - default: null - storage_account: - description: - - the azure storage account in which to store the data disks. - required: true - image: - description: - - system image for creating the virtual machine (e.g., b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB) - required: true - default: null - role_size: - description: - - azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6) - required: false - default: Small - endpoints: - description: - - a comma-separated list of TCP ports to expose on the virtual machine (e.g., "22,80") - required: false - default: 22 - user: - description: - - the unix username for the new virtual machine. - required: false - default: null - password: - description: - - the unix password for the new virtual machine. - required: false - default: null - ssh_cert_path: - description: - - path to an X509 certificate containing the public ssh key to install in the virtual machine. See http://www.windowsazure.com/en-us/manage/linux/tutorials/intro-to-linux/ for more details. - - if this option is specified, password-based ssh authentication will be disabled. - required: false - default: null - virtual_network_name: - description: - - Name of virtual network. - required: false - default: null - hostname: - description: - - hostname to write /etc/hostname. Defaults to .cloudapp.net. - required: false - default: null - wait: - description: - - wait for the instance to be in state 'running' before returning - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 600 - aliases: [] - wait_timeout_redirects: - description: - - how long before wait gives up for redirects, in seconds - default: 300 - aliases: [] - state: - description: - - create or terminate instances - required: false - default: 'present' - aliases: [] - -requirements: [ "azure" ] -author: John Whitbeck -''' - -EXAMPLES = ''' -# Note: None of these examples set subscription_id or management_cert_path -# It is assumed that their matching environment variables are set. - -# Provision virtual machine example -- local_action: - module: azure - name: my-virtual-machine - role_size: Small - image: b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB - location: 'East US' - user: ubuntu - ssh_cert_path: /path/to/azure_x509_cert.pem - storage_account: my-storage-account - wait: yes - -# Terminate virtual machine example -- local_action: - module: azure - name: my-virtual-machine - state: absent -''' - -import base64 -import datetime -import os -import sys -import time -from urlparse import urlparse - -AZURE_LOCATIONS = ['South Central US', - 'Central US', - 'East US 2', - 'East US', - 'West US', - 'North Central US', - 'North Europe', - 'West Europe', - 'East Asia', - 'Southeast Asia', - 'Japan West', - 'Japan East', - 'Brazil South'] - -AZURE_ROLE_SIZES = ['ExtraSmall', - 'Small', - 'Medium', - 'Large', - 'ExtraLarge', - 'A5', - 'A6', - 'A7', - 'A8', - 'A9', - 'Basic_A0', - 'Basic_A1', - 'Basic_A2', - 'Basic_A3', - 'Basic_A4'] - -try: - import azure as windows_azure - - from azure import WindowsAzureError, WindowsAzureMissingResourceError - from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys, - PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints, - ConfigurationSetInputEndpoint) -except ImportError: - print "failed=True msg='azure required for this module'" - sys.exit(1) - -from distutils.version import LooseVersion -from types import MethodType -import json - - -def _wait_for_completion(azure, promise, wait_timeout, msg): - if not promise: return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - operation_result = azure.get_operation_status(promise.request_id) - time.sleep(5) - if operation_result.status == "Succeeded": - return - - raise WindowsAzureError('Timed out waiting for async operation ' + msg + ' "' + str(promise.request_id) + '" to complete.') - - -def get_ssh_certificate_tokens(module, ssh_cert_path): - """ - Returns the sha1 fingerprint and a base64-encoded PKCS12 version of the certificate. - """ - # This returns a string such as SHA1 Fingerprint=88:60:0B:13:A9:14:47:DA:4E:19:10:7D:34:92:2B:DF:A1:7D:CA:FF - rc, stdout, stderr = module.run_command(['openssl', 'x509', '-in', ssh_cert_path, '-fingerprint', '-noout']) - if rc != 0: - module.fail_json(msg="failed to generate the key fingerprint, error was: %s" % stderr) - fingerprint = stdout.strip()[17:].replace(':', '') - - rc, stdout, stderr = module.run_command(['openssl', 'pkcs12', '-export', '-in', ssh_cert_path, '-nokeys', '-password', 'pass:']) - if rc != 0: - module.fail_json(msg="failed to generate the pkcs12 signature from the certificate, error was: %s" % stderr) - pkcs12_base64 = base64.b64encode(stdout.strip()) - - return (fingerprint, pkcs12_base64) - - -def create_virtual_machine(module, azure): - """ - Create new virtual machine - - module : AnsibleModule object - azure: authenticated azure ServiceManagementService object - - Returns: - True if a new virtual machine was created, false otherwise - """ - name = module.params.get('name') - hostname = module.params.get('hostname') or name + ".cloudapp.net" - endpoints = module.params.get('endpoints').split(',') - ssh_cert_path = module.params.get('ssh_cert_path') - user = module.params.get('user') - password = module.params.get('password') - location = module.params.get('location') - role_size = module.params.get('role_size') - storage_account = module.params.get('storage_account') - image = module.params.get('image') - virtual_network_name = module.params.get('virtual_network_name') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - # Check if a deployment with the same name already exists - cloud_service_name_available = azure.check_hosted_service_name_availability(name) - if not cloud_service_name_available.result: - changed = False - else: - changed = True - # Create cloud service if necessary - try: - result = azure.create_hosted_service(service_name=name, label=name, location=location) - _wait_for_completion(azure, result, wait_timeout, "create_hosted_service") - except WindowsAzureError as e: - module.fail_json(msg="failed to create the new service name, it already exists: %s" % str(e)) - - # Create linux configuration - disable_ssh_password_authentication = not password - linux_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication) - - # Add ssh certificates if specified - if ssh_cert_path: - fingerprint, pkcs12_base64 = get_ssh_certificate_tokens(module, ssh_cert_path) - # Add certificate to cloud service - result = azure.add_service_certificate(name, pkcs12_base64, 'pfx', '') - _wait_for_completion(azure, result, wait_timeout, "add_service_certificate") - - # Create ssh config - ssh_config = SSH() - ssh_config.public_keys = PublicKeys() - authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user - ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint)) - # Append ssh config to linux machine config - linux_config.ssh = ssh_config - - # Create network configuration - network_config = ConfigurationSetInputEndpoints() - network_config.configuration_set_type = 'NetworkConfiguration' - network_config.subnet_names = [] - for port in endpoints: - network_config.input_endpoints.append(ConfigurationSetInputEndpoint(name='TCP-%s' % port, - protocol='TCP', - port=port, - local_port=port)) - - # First determine where to store disk - today = datetime.date.today().strftime('%Y-%m-%d') - disk_prefix = u'%s-%s' % (name, name) - media_link = u'http://%s.blob.core.windows.net/vhds/%s-%s.vhd' % (storage_account, disk_prefix, today) - # Create system hard disk - os_hd = OSVirtualHardDisk(image, media_link) - - # Spin up virtual machine - try: - result = azure.create_virtual_machine_deployment(service_name=name, - deployment_name=name, - deployment_slot='production', - label=name, - role_name=name, - system_config=linux_config, - network_config=network_config, - os_virtual_hard_disk=os_hd, - role_size=role_size, - role_type='PersistentVMRole', - virtual_network_name=virtual_network_name) - _wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment") - except WindowsAzureError as e: - module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e)) - - - try: - deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name) - return (changed, urlparse(deployment.url).hostname, deployment) - except WindowsAzureError as e: - module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e))) - - -def terminate_virtual_machine(module, azure): - """ - Terminates a virtual machine - - module : AnsibleModule object - azure: authenticated azure ServiceManagementService object - - Not yet supported: handle deletion of attached data disks. - - Returns: - True if a new virtual machine was deleted, false otherwise - """ - - # Whether to wait for termination to complete before returning - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - name = module.params.get('name') - delete_empty_services = module.params.get('delete_empty_services') - - changed = False - - deployment = None - public_dns_name = None - disk_names = [] - try: - deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name) - except WindowsAzureMissingResourceError as e: - pass # no such deployment or service - except WindowsAzureError as e: - module.fail_json(msg="failed to find the deployment, error was: %s" % str(e)) - - # Delete deployment - if deployment: - changed = True - try: - # gather disk info - results = [] - for role in deployment.role_list: - role_props = azure.get_role(name, deployment.name, role.role_name) - if role_props.os_virtual_hard_disk.disk_name not in disk_names: - disk_names.append(role_props.os_virtual_hard_disk.disk_name) - - result = azure.delete_deployment(name, deployment.name) - _wait_for_completion(azure, result, wait_timeout, "delete_deployment") - - for disk_name in disk_names: - azure.delete_disk(disk_name, True) - - # Now that the vm is deleted, remove the cloud service - result = azure.delete_hosted_service(service_name=name) - _wait_for_completion(azure, result, wait_timeout, "delete_hosted_service") - except WindowsAzureError as e: - module.fail_json(msg="failed to delete the service %s, error was: %s" % (name, str(e))) - public_dns_name = urlparse(deployment.url).hostname - - return changed, public_dns_name, deployment - - -def get_azure_creds(module): - # Check modul args for credentials, then check environment vars - subscription_id = module.params.get('subscription_id') - if not subscription_id: - subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID', None) - if not subscription_id: - module.fail_json(msg="No subscription_id provided. Please set 'AZURE_SUBSCRIPTION_ID' or use the 'subscription_id' parameter") - - management_cert_path = module.params.get('management_cert_path') - if not management_cert_path: - management_cert_path = os.environ.get('AZURE_CERT_PATH', None) - if not management_cert_path: - module.fail_json(msg="No management_cert_path provided. Please set 'AZURE_CERT_PATH' or use the 'management_cert_path' parameter") - - return subscription_id, management_cert_path - - -def main(): - module = AnsibleModule( - argument_spec=dict( - ssh_cert_path=dict(), - name=dict(), - hostname=dict(), - location=dict(choices=AZURE_LOCATIONS), - role_size=dict(choices=AZURE_ROLE_SIZES), - subscription_id=dict(no_log=True), - storage_account=dict(), - management_cert_path=dict(), - endpoints=dict(default='22'), - user=dict(), - password=dict(), - image=dict(), - virtual_network_name=dict(default=None), - state=dict(default='present'), - wait=dict(type='bool', default=False), - wait_timeout=dict(default=600), - wait_timeout_redirects=dict(default=300) - ) - ) - # create azure ServiceManagementService object - subscription_id, management_cert_path = get_azure_creds(module) - - wait_timeout_redirects = int(module.params.get('wait_timeout_redirects')) - if LooseVersion(windows_azure.__version__) <= "0.8.0": - # wrapper for handling redirects which the sdk <= 0.8.0 is not following - azure = Wrapper(ServiceManagementService(subscription_id, management_cert_path), wait_timeout_redirects) - else: - azure = ServiceManagementService(subscription_id, management_cert_path) - - cloud_service_raw = None - if module.params.get('state') == 'absent': - (changed, public_dns_name, deployment) = terminate_virtual_machine(module, azure) - - elif module.params.get('state') == 'present': - # Changed is always set to true when provisioning new instances - if not module.params.get('name'): - module.fail_json(msg='name parameter is required for new instance') - if not module.params.get('image'): - module.fail_json(msg='image parameter is required for new instance') - if not module.params.get('user'): - module.fail_json(msg='user parameter is required for new instance') - if not module.params.get('location'): - module.fail_json(msg='location parameter is required for new instance') - if not module.params.get('storage_account'): - module.fail_json(msg='storage_account parameter is required for new instance') - (changed, public_dns_name, deployment) = create_virtual_machine(module, azure) - - module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__))) - - -class Wrapper(object): - def __init__(self, obj, wait_timeout): - self.other = obj - self.wait_timeout = wait_timeout - - def __getattr__(self, name): - if hasattr(self.other, name): - func = getattr(self.other, name) - return lambda *args, **kwargs: self._wrap(func, args, kwargs) - raise AttributeError(name) - - def _wrap(self, func, args, kwargs): - if type(func) == MethodType: - result = self._handle_temporary_redirects(lambda: func(*args, **kwargs)) - else: - result = self._handle_temporary_redirects(lambda: func(self.other, *args, **kwargs)) - return result - - def _handle_temporary_redirects(self, f): - wait_timeout = time.time() + self.wait_timeout - while wait_timeout > time.time(): - try: - return f() - except WindowsAzureError as e: - if not str(e).lower().find("temporary redirect") == -1: - time.sleep(5) - pass - else: - raise e - - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/cloud/cloudformation b/library/cloud/cloudformation deleted file mode 100644 index 6a7838a51b2..00000000000 --- a/library/cloud/cloudformation +++ /dev/null @@ -1,313 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: cloudformation -short_description: create a AWS CloudFormation stack -description: - - Launches an AWS CloudFormation stack and waits for it complete. -version_added: "1.1" -options: - stack_name: - description: - - name of the cloudformation stack - required: true - default: null - aliases: [] - disable_rollback: - description: - - If a stacks fails to form, rollback will remove the stack - required: false - default: "false" - choices: [ "true", "false" ] - aliases: [] - template_parameters: - description: - - a list of hashes of all the template variables for the stack - required: false - default: {} - aliases: [] - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: true - default: null - aliases: ['aws_region', 'ec2_region'] - state: - description: - - If state is "present", stack will be created. If state is "present" and if stack exists and template has changed, it will be updated. - If state is absent, stack will be removed. - required: true - default: null - aliases: [] - template: - description: - - the path of the cloudformation template - required: true - default: null - aliases: [] - tags: - description: - - Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later. - Requires at least Boto version 2.6.0. - required: false - default: null - aliases: [] - version_added: "1.4" - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_secret_key', 'secret_key' ] - version_added: "1.5" - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_access_key', 'access_key' ] - version_added: "1.5" - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - version_added: "1.5" - -requirements: [ "boto" ] -author: James S. Martin -''' - -EXAMPLES = ''' -# Basic task example -tasks: -- name: launch ansible cloudformation example - action: cloudformation > - stack_name="ansible-cloudformation" state=present - region=us-east-1 disable_rollback=true - template=files/cloudformation-example.json - args: - template_parameters: - KeyName: jmartin - DiskType: ephemeral - InstanceType: m1.small - ClusterSize: 3 - tags: - Stack: ansible-cloudformation -''' - -import json -import time - -try: - import boto - import boto.cloudformation.connection -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -class Region: - def __init__(self, region): - '''connects boto to the region specified in the cloudformation template''' - self.name = region - self.endpoint = 'cloudformation.%s.amazonaws.com' % region - - -def boto_exception(err): - '''generic error message handler''' - if hasattr(err, 'error_message'): - error = err.error_message - elif hasattr(err, 'message'): - error = err.message - else: - error = '%s: %s' % (Exception, err) - - return error - - -def boto_version_required(version_tuple): - parts = boto.Version.split('.') - boto_version = [] - try: - for part in parts: - boto_version.append(int(part)) - except: - boto_version.append(-1) - return tuple(boto_version) >= tuple(version_tuple) - - -def stack_operation(cfn, stack_name, operation): - '''gets the status of a stack while it is created/updated/deleted''' - existed = [] - result = {} - operation_complete = False - while operation_complete == False: - try: - stack = cfn.describe_stacks(stack_name)[0] - existed.append('yes') - except: - if 'yes' in existed: - result = dict(changed=True, - output='Stack Deleted', - events=map(str, list(stack.describe_events()))) - else: - result = dict(changed= True, output='Stack Not Found') - break - if '%s_COMPLETE' % operation == stack.stack_status: - result = dict(changed=True, - events = map(str, list(stack.describe_events())), - output = 'Stack %s complete' % operation) - break - if 'ROLLBACK_COMPLETE' == stack.stack_status or '%s_ROLLBACK_COMPLETE' % operation == stack.stack_status: - result = dict(changed=True, failed=True, - events = map(str, list(stack.describe_events())), - output = 'Problem with %s. Rollback complete' % operation) - break - elif '%s_FAILED' % operation == stack.stack_status: - result = dict(changed=True, failed=True, - events = map(str, list(stack.describe_events())), - output = 'Stack %s failed' % operation) - break - else: - time.sleep(5) - return result - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - stack_name=dict(required=True), - template_parameters=dict(required=False, type='dict', default={}), - state=dict(default='present', choices=['present', 'absent']), - template=dict(default=None, required=True), - disable_rollback=dict(default=False, type='bool'), - tags=dict(default=None) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - ) - - state = module.params['state'] - stack_name = module.params['stack_name'] - template_body = open(module.params['template'], 'r').read() - disable_rollback = module.params['disable_rollback'] - template_parameters = module.params['template_parameters'] - tags = module.params['tags'] - - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - kwargs = dict() - if tags is not None: - if not boto_version_required((2,6,0)): - module.fail_json(msg='Module parameter "tags" requires at least Boto version 2.6.0') - kwargs['tags'] = tags - - - # convert the template parameters ansible passes into a tuple for boto - template_parameters_tup = [(k, v) for k, v in template_parameters.items()] - stack_outputs = {} - - try: - cf_region = Region(region) - cfn = boto.cloudformation.connection.CloudFormationConnection( - aws_access_key_id=aws_access_key, - aws_secret_access_key=aws_secret_key, - region=cf_region, - ) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - update = False - result = {} - operation = None - - # if state is present we are going to ensure that the stack is either - # created or updated - if state == 'present': - try: - cfn.create_stack(stack_name, parameters=template_parameters_tup, - template_body=template_body, - disable_rollback=disable_rollback, - capabilities=['CAPABILITY_IAM'], - **kwargs) - operation = 'CREATE' - except Exception, err: - error_msg = boto_exception(err) - if 'AlreadyExistsException' in error_msg or 'already exists' in error_msg: - update = True - else: - module.fail_json(msg=error_msg) - if not update: - result = stack_operation(cfn, stack_name, operation) - - # if the state is present and the stack already exists, we try to update it - # AWS will tell us if the stack template and parameters are the same and - # don't need to be updated. - if update: - try: - cfn.update_stack(stack_name, parameters=template_parameters_tup, - template_body=template_body, - disable_rollback=disable_rollback, - capabilities=['CAPABILITY_IAM']) - operation = 'UPDATE' - except Exception, err: - error_msg = boto_exception(err) - if 'No updates are to be performed.' in error_msg: - result = dict(changed=False, output='Stack is already up-to-date.') - else: - module.fail_json(msg=error_msg) - - if operation == 'UPDATE': - result = stack_operation(cfn, stack_name, operation) - - # check the status of the stack while we are creating/updating it. - # and get the outputs of the stack - - if state == 'present' or update: - stack = cfn.describe_stacks(stack_name)[0] - for output in stack.outputs: - stack_outputs[output.key] = output.value - result['stack_outputs'] = stack_outputs - - # absent state is different because of the way delete_stack works. - # problem is it it doesn't give an error if stack isn't found - # so must describe the stack first - - if state == 'absent': - try: - cfn.describe_stacks(stack_name) - operation = 'DELETE' - except Exception, err: - error_msg = boto_exception(err) - if 'Stack:%s does not exist' % stack_name in error_msg: - result = dict(changed=False, output='Stack not found.') - else: - module.fail_json(msg=error_msg) - if operation == 'DELETE': - cfn.delete_stack(stack_name) - result = stack_operation(cfn, stack_name, operation) - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/digital_ocean b/library/cloud/digital_ocean deleted file mode 100644 index efebf5f1bcf..00000000000 --- a/library/cloud/digital_ocean +++ /dev/null @@ -1,434 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -DOCUMENTATION = ''' ---- -module: digital_ocean -short_description: Create/delete a droplet/SSH_key in DigitalOcean -description: - - Create/delete a droplet in DigitalOcean and optionally wait for it to be 'running', or deploy an SSH key. -version_added: "1.3" -options: - command: - description: - - Which target you want to operate on. - default: droplet - choices: ['droplet', 'ssh'] - state: - description: - - Indicate desired state of the target. - default: present - choices: ['present', 'active', 'absent', 'deleted'] - client_id: - description: - - DigitalOcean manager id. - api_key: - description: - - DigitalOcean api key. - id: - description: - - Numeric, the droplet id you want to operate on. - name: - description: - - String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key. - unique_name: - description: - - Bool, require unique hostnames. By default, DigitalOcean allows multiple hosts with the same name. Setting this to "yes" allows only one host per name. Useful for idempotence. - version_added: "1.4" - default: "no" - choices: [ "yes", "no" ] - size_id: - description: - - Numeric, this is the id of the size you would like the droplet created with. - image_id: - description: - - Numeric, this is the id of the image you would like the droplet created with. - region_id: - description: - - "Numeric, this is the id of the region you would like your server to be created in." - ssh_key_ids: - description: - - Optional, comma separated list of ssh_key_ids that you would like to be added to the server. - virtio: - description: - - "Bool, turn on virtio driver in droplet for improved network and storage I/O." - version_added: "1.4" - default: "yes" - choices: [ "yes", "no" ] - private_networking: - description: - - "Bool, add an additional, private network interface to droplet for inter-droplet communication." - version_added: "1.4" - default: "no" - choices: [ "yes", "no" ] - backups_enabled: - description: - - Optional, Boolean, enables backups for your droplet. - version_added: "1.6" - default: "no" - choices: [ "yes", "no" ] - wait: - description: - - Wait for the droplet to be in state 'running' before returning. If wait is "no" an ip_address may not be returned. - default: "yes" - choices: [ "yes", "no" ] - wait_timeout: - description: - - How long before wait gives up, in seconds. - default: 300 - ssh_pub_key: - description: - - The public SSH key you want to add to your account. - -notes: - - Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY. -requirements: [ dopy ] -''' - - -EXAMPLES = ''' -# Ensure a SSH key is present -# If a key matches this name, will return the ssh key id and changed = False -# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False - -- digital_ocean: > - state=present - command=ssh - name=my_ssh_key - ssh_pub_key='ssh-rsa AAAA...' - client_id=XXX - api_key=XXX - -# Create a new Droplet -# Will return the droplet details including the droplet id (used for idempotence) - -- digital_ocean: > - state=present - command=droplet - name=mydroplet - client_id=XXX - api_key=XXX - size_id=1 - region_id=2 - image_id=3 - wait_timeout=500 - register: my_droplet -- debug: msg="ID is {{ my_droplet.droplet.id }}" -- debug: msg="IP is {{ my_droplet.droplet.ip_address }}" - -# Ensure a droplet is present -# If droplet id already exist, will return the droplet details and changed = False -# If no droplet matches the id, a new droplet will be created and the droplet details (including the new id) are returned, changed = True. - -- digital_ocean: > - state=present - command=droplet - id=123 - name=mydroplet - client_id=XXX - api_key=XXX - size_id=1 - region_id=2 - image_id=3 - wait_timeout=500 - -# Create a droplet with ssh key -# The ssh key id can be passed as argument at the creation of a droplet (see ssh_key_ids). -# Several keys can be added to ssh_key_ids as id1,id2,id3 -# The keys are used to connect as root to the droplet. - -- digital_ocean: > - state=present - ssh_key_ids=id1,id2 - name=mydroplet - client_id=XXX - api_key=XXX - size_id=1 - region_id=2 - image_id=3 -''' - -import sys -import os -import time - -try: - import dopy - from dopy.manager import DoError, DoManager -except ImportError, e: - print "failed=True msg='dopy >= 0.2.3 required for this module'" - sys.exit(1) - -if dopy.__version__ < '0.2.3': - print "failed=True msg='dopy >= 0.2.3 required for this module'" - sys.exit(1) - -class TimeoutError(DoError): - def __init__(self, msg, id): - super(TimeoutError, self).__init__(msg) - self.id = id - -class JsonfyMixIn(object): - def to_json(self): - return self.__dict__ - -class Droplet(JsonfyMixIn): - manager = None - - def __init__(self, droplet_json): - self.status = 'new' - self.__dict__.update(droplet_json) - - def is_powered_on(self): - return self.status == 'active' - - def update_attr(self, attrs=None): - if attrs: - for k, v in attrs.iteritems(): - setattr(self, k, v) - else: - json = self.manager.show_droplet(self.id) - if json['ip_address']: - self.update_attr(json) - - def power_on(self): - assert self.status == 'off', 'Can only power on a closed one.' - json = self.manager.power_on_droplet(self.id) - self.update_attr(json) - - def ensure_powered_on(self, wait=True, wait_timeout=300): - if self.is_powered_on(): - return - if self.status == 'off': # powered off - self.power_on() - - if wait: - end_time = time.time() + wait_timeout - while time.time() < end_time: - time.sleep(min(20, end_time - time.time())) - self.update_attr() - if self.is_powered_on(): - if not self.ip_address: - raise TimeoutError('No ip is found.', self.id) - return - raise TimeoutError('Wait for droplet running timeout', self.id) - - def destroy(self): - return self.manager.destroy_droplet(self.id, scrub_data=True) - - @classmethod - def setup(cls, client_id, api_key): - cls.manager = DoManager(client_id, api_key) - - @classmethod - def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False): - json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking, backups_enabled) - droplet = cls(json) - return droplet - - @classmethod - def find(cls, id=None, name=None): - if not id and not name: - return False - - droplets = cls.list_all() - - # Check first by id. digital ocean requires that it be unique - for droplet in droplets: - if droplet.id == id: - return droplet - - # Failing that, check by hostname. - for droplet in droplets: - if droplet.name == name: - return droplet - - return False - - @classmethod - def list_all(cls): - json = cls.manager.all_active_droplets() - return map(cls, json) - -class SSH(JsonfyMixIn): - manager = None - - def __init__(self, ssh_key_json): - self.__dict__.update(ssh_key_json) - update_attr = __init__ - - def destroy(self): - self.manager.destroy_ssh_key(self.id) - return True - - @classmethod - def setup(cls, client_id, api_key): - cls.manager = DoManager(client_id, api_key) - - @classmethod - def find(cls, name): - if not name: - return False - keys = cls.list_all() - for key in keys: - if key.name == name: - return key - return False - - @classmethod - def list_all(cls): - json = cls.manager.all_ssh_keys() - return map(cls, json) - - @classmethod - def add(cls, name, key_pub): - json = cls.manager.new_ssh_key(name, key_pub) - return cls(json) - -def core(module): - def getkeyordie(k): - v = module.params[k] - if v is None: - module.fail_json(msg='Unable to load %s' % k) - return v - - try: - # params['client_id'] will be None even if client_id is not passed in - client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID'] - api_key = module.params['api_key'] or os.environ['DO_API_KEY'] - except KeyError, e: - module.fail_json(msg='Unable to load %s' % e.message) - - changed = True - command = module.params['command'] - state = module.params['state'] - - if command == 'droplet': - Droplet.setup(client_id, api_key) - if state in ('active', 'present'): - - # First, try to find a droplet by id. - droplet = Droplet.find(id=module.params['id']) - - # If we couldn't find the droplet and the user is allowing unique - # hostnames, then check to see if a droplet with the specified - # hostname already exists. - if not droplet and module.params['unique_name']: - droplet = Droplet.find(name=getkeyordie('name')) - - # If both of those attempts failed, then create a new droplet. - if not droplet: - droplet = Droplet.add( - name=getkeyordie('name'), - size_id=getkeyordie('size_id'), - image_id=getkeyordie('image_id'), - region_id=getkeyordie('region_id'), - ssh_key_ids=module.params['ssh_key_ids'], - virtio=module.params['virtio'], - private_networking=module.params['private_networking'], - backups_enabled=module.params['backups_enabled'], - ) - - if droplet.is_powered_on(): - changed = False - - droplet.ensure_powered_on( - wait=getkeyordie('wait'), - wait_timeout=getkeyordie('wait_timeout') - ) - - module.exit_json(changed=changed, droplet=droplet.to_json()) - - elif state in ('absent', 'deleted'): - # First, try to find a droplet by id. - droplet = Droplet.find(module.params['id']) - - # If we couldn't find the droplet and the user is allowing unique - # hostnames, then check to see if a droplet with the specified - # hostname already exists. - if not droplet and module.params['unique_name']: - droplet = Droplet.find(name=getkeyordie('name')) - - if not droplet: - module.exit_json(changed=False, msg='The droplet is not found.') - - event_json = droplet.destroy() - module.exit_json(changed=True, event_id=event_json['event_id']) - - elif command == 'ssh': - SSH.setup(client_id, api_key) - name = getkeyordie('name') - if state in ('active', 'present'): - key = SSH.find(name) - if key: - module.exit_json(changed=False, ssh_key=key.to_json()) - key = SSH.add(name, getkeyordie('ssh_pub_key')) - module.exit_json(changed=True, ssh_key=key.to_json()) - - elif state in ('absent', 'deleted'): - key = SSH.find(name) - if not key: - module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name) - key.destroy() - module.exit_json(changed=True) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - command = dict(choices=['droplet', 'ssh'], default='droplet'), - state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'), - client_id = dict(aliases=['CLIENT_ID'], no_log=True), - api_key = dict(aliases=['API_KEY'], no_log=True), - name = dict(type='str'), - size_id = dict(type='int'), - image_id = dict(type='int'), - region_id = dict(type='int'), - ssh_key_ids = dict(default=''), - virtio = dict(type='bool', default='yes'), - private_networking = dict(type='bool', default='no'), - backups_enabled = dict(type='bool', default='no'), - id = dict(aliases=['droplet_id'], type='int'), - unique_name = dict(type='bool', default='no'), - wait = dict(type='bool', default=True), - wait_timeout = dict(default=300, type='int'), - ssh_pub_key = dict(type='str'), - ), - required_together = ( - ['size_id', 'image_id', 'region_id'], - ), - mutually_exclusive = ( - ['size_id', 'ssh_pub_key'], - ['image_id', 'ssh_pub_key'], - ['region_id', 'ssh_pub_key'], - ), - required_one_of = ( - ['id', 'name'], - ), - ) - - try: - core(module) - except TimeoutError, e: - module.fail_json(msg=str(e), id=e.id) - except (DoError, Exception), e: - module.fail_json(msg=str(e)) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/cloud/digital_ocean_domain b/library/cloud/digital_ocean_domain deleted file mode 100644 index d0615ad0df0..00000000000 --- a/library/cloud/digital_ocean_domain +++ /dev/null @@ -1,242 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -DOCUMENTATION = ''' ---- -module: digital_ocean_domain -short_description: Create/delete a DNS record in DigitalOcean -description: - - Create/delete a DNS record in DigitalOcean. -version_added: "1.6" -options: - state: - description: - - Indicate desired state of the target. - default: present - choices: ['present', 'active', 'absent', 'deleted'] - client_id: - description: - - DigitalOcean manager id. - api_key: - description: - - DigitalOcean api key. - id: - description: - - Numeric, the droplet id you want to operate on. - name: - description: - - String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key, or the name of a domain. - ip: - description: - - The IP address to point a domain at. - -notes: - - Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY. -''' - - -EXAMPLES = ''' -# Create a domain record - -- digital_ocean_domain: > - state=present - name=my.digitalocean.domain - ip=127.0.0.1 - -# Create a droplet and a corresponding domain record - -- digital_ocean: > - state=present - name=test_droplet - size_id=1 - region_id=2 - image_id=3 - register: test_droplet - -- digital_ocean_domain: > - state=present - name={{ test_droplet.droplet.name }}.my.domain - ip={{ test_droplet.droplet.ip_address }} -''' - -import sys -import os -import time - -try: - from dopy.manager import DoError, DoManager -except ImportError as e: - print "failed=True msg='dopy required for this module'" - sys.exit(1) - -class TimeoutError(DoError): - def __init__(self, msg, id): - super(TimeoutError, self).__init__(msg) - self.id = id - -class JsonfyMixIn(object): - def to_json(self): - return self.__dict__ - -class DomainRecord(JsonfyMixIn): - manager = None - - def __init__(self, json): - self.__dict__.update(json) - update_attr = __init__ - - def update(self, data = None, record_type = None): - json = self.manager.edit_domain_record(self.domain_id, - self.id, - record_type if record_type is not None else self.record_type, - data if data is not None else self.data) - self.__dict__.update(json) - return self - - def destroy(self): - json = self.manager.destroy_domain_record(self.domain_id, self.id) - return json - -class Domain(JsonfyMixIn): - manager = None - - def __init__(self, domain_json): - self.__dict__.update(domain_json) - - def destroy(self): - self.manager.destroy_domain(self.id) - - def records(self): - json = self.manager.all_domain_records(self.id) - return map(DomainRecord, json) - - @classmethod - def add(cls, name, ip): - json = cls.manager.new_domain(name, ip) - return cls(json) - - @classmethod - def setup(cls, client_id, api_key): - cls.manager = DoManager(client_id, api_key) - DomainRecord.manager = cls.manager - - @classmethod - def list_all(cls): - domains = cls.manager.all_domains() - return map(cls, domains) - - @classmethod - def find(cls, name=None, id=None): - if name is None and id is None: - return False - - domains = Domain.list_all() - - if id is not None: - for domain in domains: - if domain.id == id: - return domain - - if name is not None: - for domain in domains: - if domain.name == name: - return domain - - return False - -def core(module): - def getkeyordie(k): - v = module.params[k] - if v is None: - module.fail_json(msg='Unable to load %s' % k) - return v - - try: - # params['client_id'] will be None even if client_id is not passed in - client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID'] - api_key = module.params['api_key'] or os.environ['DO_API_KEY'] - except KeyError, e: - module.fail_json(msg='Unable to load %s' % e.message) - - changed = True - state = module.params['state'] - - Domain.setup(client_id, api_key) - if state in ('present'): - domain = Domain.find(id=module.params["id"]) - - if not domain: - domain = Domain.find(name=getkeyordie("name")) - - if not domain: - domain = Domain.add(getkeyordie("name"), - getkeyordie("ip")) - module.exit_json(changed=True, domain=domain.to_json()) - else: - records = domain.records() - at_record = None - for record in records: - if record.name == "@": - at_record = record - - if not at_record.data == getkeyordie("ip"): - record.update(data=getkeyordie("ip"), record_type='A') - module.exit_json(changed=True, domain=Domain.find(id=record.domain_id).to_json()) - - module.exit_json(changed=False, domain=domain.to_json()) - - elif state in ('absent'): - domain = None - if "id" in module.params: - domain = Domain.find(id=module.params["id"]) - - if not domain and "name" in module.params: - domain = Domain.find(name=module.params["name"]) - - if not domain: - module.exit_json(changed=False, msg="Domain not found.") - - event_json = domain.destroy() - module.exit_json(changed=True, event=event_json) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'), - client_id = dict(aliases=['CLIENT_ID'], no_log=True), - api_key = dict(aliases=['API_KEY'], no_log=True), - name = dict(type='str'), - id = dict(aliases=['droplet_id'], type='int'), - ip = dict(type='str'), - ), - required_one_of = ( - ['id', 'name'], - ), - ) - - try: - core(module) - except TimeoutError as e: - module.fail_json(msg=str(e), id=e.id) - except (DoError, Exception) as e: - module.fail_json(msg=str(e)) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/cloud/digital_ocean_sshkey b/library/cloud/digital_ocean_sshkey deleted file mode 100644 index 69f32266b51..00000000000 --- a/library/cloud/digital_ocean_sshkey +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -DOCUMENTATION = ''' ---- -module: digital_ocean_sshkey -short_description: Create/delete an SSH key in DigitalOcean -description: - - Create/delete an SSH key. -version_added: "1.6" -options: - state: - description: - - Indicate desired state of the target. - default: present - choices: ['present', 'absent'] - client_id: - description: - - DigitalOcean manager id. - api_key: - description: - - DigitalOcean api key. - id: - description: - - Numeric, the SSH key id you want to operate on. - name: - description: - - String, this is the name of an SSH key to create or destroy. - ssh_pub_key: - description: - - The public SSH key you want to add to your account. - -notes: - - Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY. -''' - - -EXAMPLES = ''' -# Ensure a SSH key is present -# If a key matches this name, will return the ssh key id and changed = False -# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False - -- digital_ocean_sshkey: > - state=present - name=my_ssh_key - ssh_pub_key='ssh-rsa AAAA...' - client_id=XXX - api_key=XXX - -''' - -import sys -import os -import time - -try: - from dopy.manager import DoError, DoManager -except ImportError as e: - print "failed=True msg='dopy required for this module'" - sys.exit(1) - -class TimeoutError(DoError): - def __init__(self, msg, id): - super(TimeoutError, self).__init__(msg) - self.id = id - -class JsonfyMixIn(object): - def to_json(self): - return self.__dict__ - -class SSH(JsonfyMixIn): - manager = None - - def __init__(self, ssh_key_json): - self.__dict__.update(ssh_key_json) - update_attr = __init__ - - def destroy(self): - self.manager.destroy_ssh_key(self.id) - return True - - @classmethod - def setup(cls, client_id, api_key): - cls.manager = DoManager(client_id, api_key) - - @classmethod - def find(cls, name): - if not name: - return False - keys = cls.list_all() - for key in keys: - if key.name == name: - return key - return False - - @classmethod - def list_all(cls): - json = cls.manager.all_ssh_keys() - return map(cls, json) - - @classmethod - def add(cls, name, key_pub): - json = cls.manager.new_ssh_key(name, key_pub) - return cls(json) - -def core(module): - def getkeyordie(k): - v = module.params[k] - if v is None: - module.fail_json(msg='Unable to load %s' % k) - return v - - try: - # params['client_id'] will be None even if client_id is not passed in - client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID'] - api_key = module.params['api_key'] or os.environ['DO_API_KEY'] - except KeyError, e: - module.fail_json(msg='Unable to load %s' % e.message) - - changed = True - state = module.params['state'] - - SSH.setup(client_id, api_key) - name = getkeyordie('name') - if state in ('present'): - key = SSH.find(name) - if key: - module.exit_json(changed=False, ssh_key=key.to_json()) - key = SSH.add(name, getkeyordie('ssh_pub_key')) - module.exit_json(changed=True, ssh_key=key.to_json()) - - elif state in ('absent'): - key = SSH.find(name) - if not key: - module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name) - key.destroy() - module.exit_json(changed=True) - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(choices=['present', 'absent'], default='present'), - client_id = dict(aliases=['CLIENT_ID'], no_log=True), - api_key = dict(aliases=['API_KEY'], no_log=True), - name = dict(type='str'), - id = dict(aliases=['droplet_id'], type='int'), - ssh_pub_key = dict(type='str'), - ), - required_one_of = ( - ['id', 'name'], - ), - ) - - try: - core(module) - except TimeoutError as e: - module.fail_json(msg=str(e), id=e.id) - except (DoError, Exception) as e: - module.fail_json(msg=str(e)) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/cloud/docker b/library/cloud/docker deleted file mode 100644 index b831485e525..00000000000 --- a/library/cloud/docker +++ /dev/null @@ -1,848 +0,0 @@ -#!/usr/bin/python - -# (c) 2013, Cove Schneider -# (c) 2014, Joshua Conner -# (c) 2014, Pavel Antonov -# -# This file is part of Ansible, -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -###################################################################### - -DOCUMENTATION = ''' ---- -module: docker -version_added: "1.4" -short_description: manage docker containers -description: - - Manage the life cycle of docker containers. -options: - count: - description: - - Set number of containers to run - required: False - default: 1 - aliases: [] - image: - description: - - Set container image to use - required: true - default: null - aliases: [] - command: - description: - - Set command to run in a container on startup - required: false - default: null - aliases: [] - name: - description: - - Set name for container (used to find single container or to provide links) - required: false - default: null - aliases: [] - version_added: "1.5" - ports: - description: - - Set private to public port mapping specification using docker CLI-style syntax [([:[host_port]])|():][/udp] - required: false - default: null - aliases: [] - version_added: "1.5" - expose: - description: - - Set container ports to expose for port mappings or links. (If the port is already exposed using EXPOSE in a Dockerfile, you don't need to expose it again.) - required: false - default: null - aliases: [] - version_added: "1.5" - publish_all_ports: - description: - - Publish all exposed ports to the host interfaces - required: false - default: false - aliases: [] - version_added: "1.5" - volumes: - description: - - Set volume(s) to mount on the container - required: false - default: null - aliases: [] - volumes_from: - description: - - Set shared volume(s) from another container - required: false - default: null - aliases: [] - links: - description: - - Link container(s) to other container(s) (e.g. links=redis,postgresql:db) - required: false - default: null - aliases: [] - version_added: "1.5" - memory_limit: - description: - - Set RAM allocated to container - required: false - default: null - aliases: [] - default: 256MB - docker_url: - description: - - URL of docker host to issue commands to - required: false - default: unix://var/run/docker.sock - aliases: [] - docker_api_version: - description: - - Remote API version to use. This defaults to the current default as specified by docker-py. - required: false - default: docker-py default remote API version - aliases: [] - version_added: "1.8" - username: - description: - - Set remote API username - required: false - default: null - aliases: [] - password: - description: - - Set remote API password - required: false - default: null - aliases: [] - hostname: - description: - - Set container hostname - required: false - default: null - aliases: [] - env: - description: - - Set environment variables (e.g. env="PASSWORD=sEcRe7,WORKERS=4") - required: false - default: null - aliases: [] - dns: - description: - - Set custom DNS servers for the container - required: false - default: null - aliases: [] - detach: - description: - - Enable detached mode on start up, leaves container running in background - required: false - default: true - aliases: [] - state: - description: - - Set the state of the container - required: false - default: present - choices: [ "present", "running", "stopped", "absent", "killed", "restarted" ] - aliases: [] - privileged: - description: - - Set whether the container should run in privileged mode - required: false - default: false - aliases: [] - lxc_conf: - description: - - LXC config parameters, e.g. lxc.aa_profile:unconfined - required: false - default: - aliases: [] - name: - description: - - Set the name of the container (cannot use with count) - required: false - default: null - aliases: [] - version_added: "1.5" - stdin_open: - description: - - Keep stdin open - required: false - default: false - aliases: [] - version_added: "1.6" - tty: - description: - - Allocate a pseudo-tty - required: false - default: false - aliases: [] - version_added: "1.6" - net: - description: - - Set Network mode for the container (bridge, none, container:, host). Requires docker >= 0.11. - required: false - default: false - aliases: [] - version_added: "1.8" - registry: - description: - - The remote registry URL to use for pulling images. - required: false - default: '' - aliases: [] - version_added: "1.8" - -author: Cove Schneider, Joshua Conner, Pavel Antonov -requirements: [ "docker-py >= 0.3.0", "docker >= 0.10.0" ] -''' - -EXAMPLES = ''' -Start one docker container running tomcat in each host of the web group and bind tomcat's listening port to 8080 -on the host: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat servers - docker: image=centos command="service tomcat6 start" ports=8080 - -The tomcat server's port is NAT'ed to a dynamic port on the host, but you can determine which port the server was -mapped to using docker_containers: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat servers - docker: image=centos command="service tomcat6 start" ports=8080 count=5 - - name: Display IP address and port mappings for containers - debug: msg={{inventory_hostname}}:{{item['HostConfig']['PortBindings']['8080/tcp'][0]['HostPort']}} - with_items: docker_containers - -Just as in the previous example, but iterates over the list of docker containers with a sequence: - -- hosts: web - sudo: yes - vars: - start_containers_count: 5 - tasks: - - name: run tomcat servers - docker: image=centos command="service tomcat6 start" ports=8080 count={{start_containers_count}} - - name: Display IP address and port mappings for containers - debug: msg="{{inventory_hostname}}:{{docker_containers[{{item}}]['HostConfig']['PortBindings']['8080/tcp'][0]['HostPort']}}" - with_sequence: start=0 end={{start_containers_count - 1}} - -Stop, remove all of the running tomcat containers and list the exit code from the stopped containers: - -- hosts: web - sudo: yes - tasks: - - name: stop tomcat servers - docker: image=centos command="service tomcat6 start" state=absent - - name: Display return codes from stopped containers - debug: msg="Returned {{inventory_hostname}}:{{item}}" - with_items: docker_containers - -Create a named container: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat server - docker: image=centos name=tomcat command="service tomcat6 start" ports=8080 - -Create multiple named containers: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat servers - docker: image=centos name={{item}} command="service tomcat6 start" ports=8080 - with_items: - - crookshank - - snowbell - - heathcliff - - felix - - sylvester - -Create containers named in a sequence: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat servers - docker: image=centos name={{item}} command="service tomcat6 start" ports=8080 - with_sequence: start=1 end=5 format=tomcat_%d.example.com - -Create two linked containers: - -- hosts: web - sudo: yes - tasks: - - name: ensure redis container is running - docker: image=crosbymichael/redis name=redis - - - name: ensure redis_ambassador container is running - docker: image=svendowideit/ambassador ports=6379:6379 links=redis:redis name=redis_ambassador_ansible - -Create containers with options specified as key-value pairs and lists: - -- hosts: web - sudo: yes - tasks: - - docker: - image: namespace/image_name - links: - - postgresql:db - - redis:redis - - -Create containers with options specified as strings and lists as comma-separated strings: - -- hosts: web - sudo: yes - tasks: - docker: image=namespace/image_name links=postgresql:db,redis:redis - -Create a container with no networking: - -- hosts: web - sudo: yes - tasks: - docker: image=namespace/image_name net=none - -''' - -HAS_DOCKER_PY = True - -import sys -from urlparse import urlparse -try: - import docker.client - import docker.utils - from requests.exceptions import * -except ImportError, e: - HAS_DOCKER_PY = False - -try: - from docker.errors import APIError as DockerAPIError -except ImportError: - from docker.client import APIError as DockerAPIError - - -def _human_to_bytes(number): - suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] - - if isinstance(number, int): - return number - if number[-1] == suffixes[0] and number[-2].isdigit(): - return number[:-1] - - i = 1 - for each in suffixes[1:]: - if number[-len(each):] == suffixes[i]: - return int(number[:-len(each)]) * (1024 ** i) - i = i + 1 - - print "failed=True msg='Could not convert %s to integer'" % (number) - sys.exit(1) - -def _ansible_facts(container_list): - return {"docker_containers": container_list} - -def _docker_id_quirk(inspect): - # XXX: some quirk in docker - if 'ID' in inspect: - inspect['Id'] = inspect['ID'] - del inspect['ID'] - return inspect - -class DockerManager: - - counters = {'created':0, 'started':0, 'stopped':0, 'killed':0, 'removed':0, 'restarted':0, 'pull':0} - - def __init__(self, module): - self.module = module - - self.binds = None - self.volumes = None - if self.module.params.get('volumes'): - self.binds = {} - self.volumes = {} - vols = self.module.params.get('volumes') - for vol in vols: - parts = vol.split(":") - # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) - if len(parts) == 2: - self.volumes[parts[1]] = {} - self.binds[parts[0]] = parts[1] - # with bind mode - elif len(parts) == 3: - if parts[2] not in ['ro', 'rw']: - self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') - ro = parts[2] == 'ro' - self.volumes[parts[1]] = {} - self.binds[parts[0]] = {'bind': parts[1], 'ro': ro} - # docker mount (e.g. /www, mounts a docker volume /www on the container at the same location) - else: - self.volumes[parts[0]] = {} - - self.lxc_conf = None - if self.module.params.get('lxc_conf'): - self.lxc_conf = [] - options = self.module.params.get('lxc_conf') - for option in options: - parts = option.split(':') - self.lxc_conf.append({"Key": parts[0], "Value": parts[1]}) - - self.exposed_ports = None - if self.module.params.get('expose'): - self.exposed_ports = self.get_exposed_ports(self.module.params.get('expose')) - - self.port_bindings = None - if self.module.params.get('ports'): - self.port_bindings = self.get_port_bindings(self.module.params.get('ports')) - - self.links = None - if self.module.params.get('links'): - self.links = self.get_links(self.module.params.get('links')) - - self.env = self.module.params.get('env', None) - - # connect to docker server - docker_url = urlparse(module.params.get('docker_url')) - docker_api_version = module.params.get('docker_api_version') - self.client = docker.Client(base_url=docker_url.geturl(), version=docker_api_version) - - - def get_links(self, links): - """ - Parse the links passed, if a link is specified without an alias then just create the alias of the same name as the link - """ - processed_links = {} - - for link in links: - parsed_link = link.split(':', 1) - if(len(parsed_link) == 2): - processed_links[parsed_link[0]] = parsed_link[1] - else: - processed_links[parsed_link[0]] = parsed_link[0] - - return processed_links - - - def get_exposed_ports(self, expose_list): - """ - Parse the ports and protocols (TCP/UDP) to expose in the docker-py `create_container` call from the docker CLI-style syntax. - """ - if expose_list: - exposed = [] - for port in expose_list: - port = port.strip() - if port.endswith('/tcp') or port.endswith('/udp'): - port_with_proto = tuple(port.split('/')) - else: - # assume tcp protocol if not specified - port_with_proto = (port, 'tcp') - exposed.append(port_with_proto) - return exposed - else: - return None - - - def get_port_bindings(self, ports): - """ - Parse the `ports` string into a port bindings dict for the `start_container` call. - """ - binds = {} - for port in ports: - # ports could potentially be an array like [80, 443], so we make sure they're strings - # before splitting - parts = str(port).split(':') - container_port = parts[-1] - if '/' not in container_port: - container_port = int(parts[-1]) - - p_len = len(parts) - if p_len == 1: - # Bind `container_port` of the container to a dynamically - # allocated TCP port on all available interfaces of the host - # machine. - bind = ('0.0.0.0',) - elif p_len == 2: - # Bind `container_port` of the container to port `parts[0]` on - # all available interfaces of the host machine. - bind = ('0.0.0.0', int(parts[0])) - elif p_len == 3: - # Bind `container_port` of the container to port `parts[1]` on - # IP `parts[0]` of the host machine. If `parts[1]` empty bind - # to a dynamically allocacted port of IP `parts[0]`. - bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],) - - if container_port in binds: - old_bind = binds[container_port] - if isinstance(old_bind, list): - # append to list if it already exists - old_bind.append(bind) - else: - # otherwise create list that contains the old and new binds - binds[container_port] = [binds[container_port], bind] - else: - binds[container_port] = bind - - return binds - - - def get_split_image_tag(self, image): - # If image contains a host or org name, omit that from our check - resource = image.rsplit('/', 1)[-1] - - # now we can determine if image has a tag - if resource.find(':') > 0: - # Use image here so that host and org name are included - return image.split(':', 1) - else: - tag = "latest" - return image, tag - - def get_summary_counters_msg(self): - msg = "" - for k, v in self.counters.iteritems(): - msg = msg + "%s %d " % (k, v) - - return msg - - def increment_counter(self, name): - self.counters[name] = self.counters[name] + 1 - - def has_changed(self): - for k, v in self.counters.iteritems(): - if v > 0: - return True - - return False - - def get_inspect_containers(self, containers): - inspect = [] - for i in containers: - details = self.client.inspect_container(i['Id']) - details = _docker_id_quirk(details) - inspect.append(details) - - return inspect - - def get_deployed_containers(self): - """determine which images/commands are running already""" - image = self.module.params.get('image') - command = self.module.params.get('command') - if command: - command = command.strip() - name = self.module.params.get('name') - if name and not name.startswith('/'): - name = '/' + name - deployed = [] - - # if we weren't given a tag with the image, we need to only compare on the image name, as that - # docker will give us back the full image name including a tag in the container list if one exists. - image, tag = self.get_split_image_tag(image) - - for i in self.client.containers(all=True): - running_image, running_tag = self.get_split_image_tag(i['Image']) - running_command = i['Command'].strip() - - name_matches = False - if i["Names"]: - name_matches = (name and name in i['Names']) - image_matches = (running_image == image) - tag_matches = (not tag or running_tag == tag) - # if a container has an entrypoint, `command` will actually equal - # '{} {}'.format(entrypoint, command) - command_matches = (not command or running_command.endswith(command)) - - if name_matches or (name is None and image_matches and tag_matches and command_matches): - details = self.client.inspect_container(i['Id']) - details = _docker_id_quirk(details) - deployed.append(details) - - return deployed - - def get_running_containers(self): - running = [] - for i in self.get_deployed_containers(): - if i['State']['Running'] == True and i['State'].get('Ghost', False) == False: - running.append(i) - - return running - - def create_containers(self, count=1): - params = {'image': self.module.params.get('image'), - 'command': self.module.params.get('command'), - 'ports': self.exposed_ports, - 'volumes': self.volumes, - 'mem_limit': _human_to_bytes(self.module.params.get('memory_limit')), - 'environment': self.env, - 'hostname': self.module.params.get('hostname'), - 'detach': self.module.params.get('detach'), - 'name': self.module.params.get('name'), - 'stdin_open': self.module.params.get('stdin_open'), - 'tty': self.module.params.get('tty'), - } - - if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) < 0: - params['dns'] = self.module.params.get('dns') - params['volumes_from'] = self.module.params.get('volumes_from') - - def do_create(count, params): - results = [] - for _ in range(count): - result = self.client.create_container(**params) - self.increment_counter('created') - results.append(result) - - return results - - try: - containers = do_create(count, params) - except: - resource = self.module.params.get('image') - image, tag = self.get_split_image_tag(resource) - if self.module.params.get('username'): - try: - self.client.login( - self.module.params.get('username'), - password=self.module.params.get('password'), - email=self.module.params.get('email'), - registry=self.module.params.get('registry') - ) - except: - self.module.fail_json(msg="failed to login to the remote registry, check your username/password.") - try: - self.client.pull(image, tag=tag) - except: - self.module.fail_json(msg="failed to pull the specified image: %s" % resource) - self.increment_counter('pull') - containers = do_create(count, params) - - return containers - - def start_containers(self, containers): - params = { - 'lxc_conf': self.lxc_conf, - 'binds': self.binds, - 'port_bindings': self.port_bindings, - 'publish_all_ports': self.module.params.get('publish_all_ports'), - 'privileged': self.module.params.get('privileged'), - 'links': self.links, - 'network_mode': self.module.params.get('net'), - } - if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) >= 0 and hasattr(docker, '__version__') and docker.__version__ > '0.3.0': - params['dns'] = self.module.params.get('dns') - params['volumes_from'] = self.module.params.get('volumes_from') - - for i in containers: - self.client.start(i['Id'], **params) - self.increment_counter('started') - - def stop_containers(self, containers): - for i in containers: - self.client.stop(i['Id']) - self.increment_counter('stopped') - - return [self.client.wait(i['Id']) for i in containers] - - def remove_containers(self, containers): - for i in containers: - self.client.remove_container(i['Id']) - self.increment_counter('removed') - - def kill_containers(self, containers): - for i in containers: - self.client.kill(i['Id']) - self.increment_counter('killed') - - def restart_containers(self, containers): - for i in containers: - self.client.restart(i['Id']) - self.increment_counter('restarted') - - -def check_dependencies(module): - """ - Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a - helpful error message if it isn't. - """ - if not HAS_DOCKER_PY: - module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.") - else: - HAS_NEW_ENOUGH_DOCKER_PY = False - if hasattr(docker, '__version__'): - # a '__version__' attribute was added to the module but not until - # after 0.3.0 was added pushed to pip. If it's there, use it. - if docker.__version__ >= '0.3.0': - HAS_NEW_ENOUGH_DOCKER_PY = True - else: - # HACK: if '__version__' isn't there, we check for the existence of - # `_get_raw_response_socket` in the docker.Client class, which was - # added in 0.3.0 - if hasattr(docker.Client, '_get_raw_response_socket'): - HAS_NEW_ENOUGH_DOCKER_PY = True - - if not HAS_NEW_ENOUGH_DOCKER_PY: - module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.") - - -def main(): - module = AnsibleModule( - argument_spec = dict( - count = dict(default=1), - image = dict(required=True), - command = dict(required=False, default=None), - expose = dict(required=False, default=None, type='list'), - ports = dict(required=False, default=None, type='list'), - publish_all_ports = dict(default=False, type='bool'), - volumes = dict(default=None, type='list'), - volumes_from = dict(default=None), - links = dict(default=None, type='list'), - memory_limit = dict(default=0), - memory_swap = dict(default=0), - docker_url = dict(default='unix://var/run/docker.sock'), - docker_api_version = dict(default=docker.client.DEFAULT_DOCKER_API_VERSION), - username = dict(default=None), - password = dict(), - email = dict(), - registry = dict(), - hostname = dict(default=None), - env = dict(type='dict'), - dns = dict(), - detach = dict(default=True, type='bool'), - state = dict(default='running', choices=['absent', 'present', 'running', 'stopped', 'killed', 'restarted']), - debug = dict(default=False, type='bool'), - privileged = dict(default=False, type='bool'), - stdin_open = dict(default=False, type='bool'), - tty = dict(default=False, type='bool'), - lxc_conf = dict(default=None, type='list'), - name = dict(default=None), - net = dict(default=None) - ) - ) - - check_dependencies(module) - - try: - manager = DockerManager(module) - state = module.params.get('state') - count = int(module.params.get('count')) - name = module.params.get('name') - image = module.params.get('image') - - if count < 0: - module.fail_json(msg="Count must be greater than zero") - if count > 1 and name: - module.fail_json(msg="Count and name must not be used together") - - running_containers = manager.get_running_containers() - running_count = len(running_containers) - delta = count - running_count - deployed_containers = manager.get_deployed_containers() - facts = None - failed = False - changed = False - - # start/stop containers - if state in [ "running", "present" ]: - - # make sure a container with `name` exists, if not create and start it - if name: - # first determine if a container with this name exists - existing_container = None - for deployed_container in deployed_containers: - if deployed_container.get('Name') == '/%s' % name: - existing_container = deployed_container - break - - # the named container is running, but with a - # different image or tag, so we stop it first - if existing_container and existing_container.get('Config', dict()).get('Image') != image: - manager.stop_containers([existing_container]) - manager.remove_containers([existing_container]) - running_containers = manager.get_running_containers() - deployed_containers = manager.get_deployed_containers() - existing_container = None - - # if the container isn't running (or if we stopped the - # old version above), create and (maybe) start it up now - if not existing_container: - containers = manager.create_containers(1) - if state == "present": # otherwise it get (re)started later anyways.. - manager.start_containers(containers) - running_containers = manager.get_running_containers() - deployed_containers = manager.get_deployed_containers() - - if state == "running": - # make sure a container with `name` is running - if name and "/" + name not in map(lambda x: x.get('Name'), running_containers): - manager.start_containers(deployed_containers) - - # start more containers if we don't have enough - elif delta > 0: - containers = manager.create_containers(delta) - manager.start_containers(containers) - - # stop containers if we have too many - elif delta < 0: - containers_to_stop = running_containers[0:abs(delta)] - containers = manager.stop_containers(containers_to_stop) - manager.remove_containers(containers_to_stop) - - facts = manager.get_running_containers() - else: - facts = manager.get_deployed_containers() - - # stop and remove containers - elif state == "absent": - facts = manager.stop_containers(deployed_containers) - manager.remove_containers(deployed_containers) - - # stop containers - elif state == "stopped": - facts = manager.stop_containers(running_containers) - - # kill containers - elif state == "killed": - manager.kill_containers(running_containers) - - # restart containers - elif state == "restarted": - manager.restart_containers(running_containers) - facts = manager.get_inspect_containers(running_containers) - - msg = "%s container(s) running image %s with command %s" % \ - (manager.get_summary_counters_msg(), module.params.get('image'), module.params.get('command')) - changed = manager.has_changed() - - module.exit_json(failed=failed, changed=changed, msg=msg, ansible_facts=_ansible_facts(facts)) - - except DockerAPIError, e: - changed = manager.has_changed() - module.exit_json(failed=True, changed=changed, msg="Docker API error: " + e.explanation) - - except RequestException, e: - changed = manager.has_changed() - module.exit_json(failed=True, changed=changed, msg=repr(e)) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/cloud/docker_image b/library/cloud/docker_image deleted file mode 100644 index e1388f20f1a..00000000000 --- a/library/cloud/docker_image +++ /dev/null @@ -1,252 +0,0 @@ -#!/usr/bin/python -# - -# (c) 2014, Pavel Antonov -# -# This file is part of Ansible -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -###################################################################### - -DOCUMENTATION = ''' ---- -module: docker_image -author: Pavel Antonov -version_added: "1.5" -short_description: manage docker images -description: - - Create, check and remove docker images -options: - path: - description: - - Path to directory with Dockerfile - required: false - default: null - aliases: [] - name: - description: - - Image name to work with - required: true - default: null - aliases: [] - tag: - description: - - Image tag to work with - required: false - default: "latest" - aliases: [] - nocache: - description: - - Do not use cache with building - required: false - default: false - aliases: [] - docker_url: - description: - - URL of docker host to issue commands to - required: false - default: unix://var/run/docker.sock - aliases: [] - state: - description: - - Set the state of the image - required: false - default: present - choices: [ "present", "absent", "build" ] - aliases: [] - timeout: - description: - - Set image operation timeout - required: false - default: 600 - aliases: [] -requirements: [ "docker-py" ] -''' - -EXAMPLES = ''' -Build docker image if required. Path should contains Dockerfile to build image: - -- hosts: web - sudo: yes - tasks: - - name: check or build image - docker_image: path="/path/to/build/dir" name="my/app" state=present - -Build new version of image: - -- hosts: web - sudo: yes - tasks: - - name: check or build image - docker_image: path="/path/to/build/dir" name="my/app" state=build - -Remove image from local docker storage: - -- hosts: web - sudo: yes - tasks: - - name: remove image - docker_image: name="my/app" state=absent - -''' - -try: - import sys - import re - import json - import docker.client - from requests.exceptions import * - from urlparse import urlparse -except ImportError, e: - print "failed=True msg='failed to import python module: %s'" % e - sys.exit(1) - -try: - from docker.errors import APIError as DockerAPIError -except ImportError: - from docker.client import APIError as DockerAPIError - -class DockerImageManager: - - def __init__(self, module): - self.module = module - self.path = self.module.params.get('path') - self.name = self.module.params.get('name') - self.tag = self.module.params.get('tag') - self.nocache = self.module.params.get('nocache') - docker_url = urlparse(module.params.get('docker_url')) - self.client = docker.Client(base_url=docker_url.geturl(), timeout=module.params.get('timeout')) - self.changed = False - self.log = [] - self.error_msg = None - - def get_log(self, as_string=True): - return "".join(self.log) if as_string else self.log - - def build(self): - stream = self.client.build(self.path, tag=':'.join([self.name, self.tag]), nocache=self.nocache, rm=True, stream=True) - success_search = r'Successfully built ([0-9a-f]+)' - image_id = None - self.changed = True - - for chunk in stream: - if not chunk: - continue - - try: - chunk_json = json.loads(chunk) - except ValueError: - continue - - if 'error' in chunk_json: - self.error_msg = chunk_json['error'] - return None - - if 'stream' in chunk_json: - output = chunk_json['stream'] - self.log.append(output) - match = re.search(success_search, output) - if match: - image_id = match.group(1) - - # Just in case we skipped evaluating the JSON returned from build - # during every iteration, add an error if the image_id was never - # populated - if not image_id: - self.error_msg = 'Unknown error encountered' - - return image_id - - def has_changed(self): - return self.changed - - def get_images(self): - filtered_images = [] - images = self.client.images() - for i in images: - # Docker-py version >= 0.3 (Docker API >= 1.8) - if 'RepoTags' in i: - repotag = ':'.join([self.name, self.tag]) - if not self.name or repotag in i['RepoTags']: - filtered_images.append(i) - # Docker-py version < 0.3 (Docker API < 1.8) - elif (not self.name or self.name == i['Repository']) and (not self.tag or self.tag == i['Tag']): - filtered_images.append(i) - return filtered_images - - def remove_images(self): - images = self.get_images() - for i in images: - try: - self.client.remove_image(i['Id']) - self.changed = True - except DockerAPIError as e: - # image can be removed by docker if not used - pass - - -def main(): - module = AnsibleModule( - argument_spec = dict( - path = dict(required=False, default=None), - name = dict(required=True), - tag = dict(required=False, default="latest"), - nocache = dict(default=False, type='bool'), - state = dict(default='present', choices=['absent', 'present', 'build']), - docker_url = dict(default='unix://var/run/docker.sock'), - timeout = dict(default=600, type='int'), - ) - ) - - try: - manager = DockerImageManager(module) - state = module.params.get('state') - failed = False - image_id = None - msg = '' - do_build = False - - # build image if not exists - if state == "present": - images = manager.get_images() - if len(images) == 0: - do_build = True - # build image - elif state == "build": - do_build = True - # remove image or images - elif state == "absent": - manager.remove_images() - - if do_build: - image_id = manager.build() - if image_id: - msg = "Image built: %s" % image_id - else: - failed = True - msg = "Error: %s\nLog:%s" % (manager.error_msg, manager.get_log()) - - module.exit_json(failed=failed, changed=manager.has_changed(), msg=msg, image_id=image_id) - - except DockerAPIError as e: - module.exit_json(failed=True, changed=manager.has_changed(), msg="Docker API error: " + e.explanation) - - except RequestException as e: - module.exit_json(failed=True, changed=manager.has_changed(), msg=repr(e)) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/cloud/ec2 b/library/cloud/ec2 deleted file mode 100644 index cb14d9b851f..00000000000 --- a/library/cloud/ec2 +++ /dev/null @@ -1,1195 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ec2 -short_description: create, terminate, start or stop an instance in ec2, return instanceid -description: - - Creates or terminates ec2 instances. When created optionally waits for it to be 'running'. This module has a dependency on python-boto >= 2.5 -version_added: "0.9" -options: - key_name: - description: - - key pair to use on the instance - required: false - default: null - aliases: ['keypair'] - id: - description: - - identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). - required: false - default: null - aliases: [] - group: - description: - - security group (or list of groups) to use with the instance - required: false - default: null - aliases: [ 'groups' ] - group_id: - version_added: "1.1" - description: - - security group id (or list of ids) to use with the instance - required: false - default: null - aliases: [] - region: - version_added: "1.2" - description: - - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - default: null - aliases: [ 'aws_region', 'ec2_region' ] - zone: - version_added: "1.2" - description: - - AWS availability zone in which to launch the instance - required: false - default: null - aliases: [ 'aws_zone', 'ec2_zone' ] - instance_type: - description: - - instance type to use for the instance - required: true - default: null - aliases: [] - spot_price: - version_added: "1.5" - description: - - Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid. When it is filled, the instance is started. - required: false - default: null - aliases: [] - image: - description: - - I(emi) (or I(ami)) to use for the instance - required: true - default: null - aliases: [] - kernel: - description: - - kernel I(eki) to use for the instance - required: false - default: null - aliases: [] - ramdisk: - description: - - ramdisk I(eri) to use for the instance - required: false - default: null - aliases: [] - wait: - description: - - wait for the instance to be in state 'running' before returning - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 - aliases: [] - spot_wait_timeout: - version_added: "1.5" - description: - - how long to wait for the spot instance request to be fulfilled - default: 600 - aliases: [] - count: - description: - - number of instances to launch - required: False - default: 1 - aliases: [] - monitoring: - version_added: "1.1" - description: - - enable detailed monitoring (CloudWatch) for instance - required: false - default: null - aliases: [] - user_data: - version_added: "0.9" - description: - - opaque blob of data which is made available to the ec2 instance - required: false - default: null - aliases: [] - instance_tags: - version_added: "1.0" - description: - - a hash/dictionary of tags to add to the new instance; '{"key":"value"}' and '{"key":"value","key":"value"}' - required: false - default: null - aliases: [] - placement_group: - version_added: "1.3" - description: - - placement group for the instance when using EC2 Clustered Compute - required: false - default: null - aliases: [] - vpc_subnet_id: - version_added: "1.1" - description: - - the subnet ID in which to launch the instance (VPC) - required: false - default: null - aliases: [] - assign_public_ip: - version_added: "1.5" - description: - - when provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+ - required: false - default: null - aliases: [] - private_ip: - version_added: "1.2" - description: - - the private ip address to assign the instance (from the vpc subnet) - required: false - default: null - aliases: [] - instance_profile_name: - version_added: "1.3" - description: - - Name of the IAM instance profile to use. Boto library must be 2.5.0+ - required: false - default: null - aliases: [] - instance_ids: - version_added: "1.3" - description: - - "list of instance ids, currently used for states: absent, running, stopped" - required: false - default: null - aliases: [] - source_dest_check: - version_added: "1.6" - description: - - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers) - required: false - default: true - state: - version_added: "1.3" - description: - - create or terminate instances - required: false - default: 'present' - aliases: [] - choices: ['present', 'absent', 'running', 'stopped'] - volumes: - version_added: "1.5" - description: - - a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. - required: false - default: null - aliases: [] - ebs_optimized: - version_added: "1.6" - description: - - whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) - required: false - default: false - exact_count: - version_added: "1.5" - description: - - An integer value which indicates how many instances that match the 'count_tag' parameter should be running. Instances are either created or terminated based on this value. - required: false - default: null - aliases: [] - count_tag: - version_added: "1.5" - description: - - Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running. This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers that are tagged with "class=webserver". - required: false - default: null - aliases: [] - -author: Seth Vidal, Tim Gerla, Lester Wade -extends_documentation_fragment: aws -''' - -EXAMPLES = ''' -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. - -# Basic provisioning example -- local_action: - module: ec2 - key_name: mykey - instance_type: c1.medium - image: emi-40603AD1 - wait: yes - group: webserver - count: 3 - -# Advanced example with tagging and CloudWatch -- local_action: - module: ec2 - key_name: mykey - group: databases - instance_type: m1.large - image: ami-6e649707 - wait: yes - wait_timeout: 500 - count: 5 - instance_tags: - db: postgres - monitoring: yes - -# Single instance with additional IOPS volume from snapshot and volume delete on termination -local_action: - module: ec2 - key_name: mykey - group: webserver - instance_type: m1.large - image: ami-6e649707 - wait: yes - wait_timeout: 500 - volumes: - - device_name: /dev/sdb - snapshot: snap-abcdef12 - device_type: io1 - iops: 1000 - volume_size: 100 - delete_on_termination: true - monitoring: yes - -# Multiple groups example -local_action: - module: ec2 - key_name: mykey - group: ['databases', 'internal-services', 'sshable', 'and-so-forth'] - instance_type: m1.large - image: ami-6e649707 - wait: yes - wait_timeout: 500 - count: 5 - instance_tags: - db: postgres - monitoring: yes - -# Multiple instances with additional volume from snapshot -local_action: - module: ec2 - key_name: mykey - group: webserver - instance_type: m1.large - image: ami-6e649707 - wait: yes - wait_timeout: 500 - count: 5 - volumes: - - device_name: /dev/sdb - snapshot: snap-abcdef12 - volume_size: 10 - monitoring: yes - -# VPC example -- local_action: - module: ec2 - key_name: mykey - group_id: sg-1dc53f72 - instance_type: m1.small - image: ami-6e649707 - wait: yes - vpc_subnet_id: subnet-29e63245 - assign_public_ip: yes - -# Spot instance example -- local_action: - module: ec2 - spot_price: 0.24 - spot_wait_timeout: 600 - keypair: mykey - group_id: sg-1dc53f72 - instance_type: m1.small - image: ami-6e649707 - wait: yes - vpc_subnet_id: subnet-29e63245 - assign_public_ip: yes - -# Launch instances, runs some tasks -# and then terminate them - - -- name: Create a sandbox instance - hosts: localhost - gather_facts: False - vars: - key_name: my_keypair - instance_type: m1.small - security_group: my_securitygroup - image: my_ami_id - region: us-east-1 - tasks: - - name: Launch instance - local_action: ec2 key_name={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image }} wait=true region={{ region }} - register: ec2 - - name: Add new instance to host group - local_action: add_host hostname={{ item.public_ip }} groupname=launched - with_items: ec2.instances - - name: Wait for SSH to come up - local_action: wait_for host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started - with_items: ec2.instances - -- name: Configure instance(s) - hosts: launched - sudo: True - gather_facts: True - roles: - - my_awesome_role - - my_awesome_test - -- name: Terminate instances - hosts: localhost - connection: local - tasks: - - name: Terminate instances that were previously launched - local_action: - module: ec2 - state: 'absent' - instance_ids: '{{ ec2.instance_ids }}' - -# Start a few existing instances, run some tasks -# and stop the instances - -- name: Start sandbox instances - hosts: localhost - gather_facts: false - connection: local - vars: - instance_ids: - - 'i-xxxxxx' - - 'i-xxxxxx' - - 'i-xxxxxx' - region: us-east-1 - tasks: - - name: Start the sandbox instances - local_action: - module: ec2 - instance_ids: '{{ instance_ids }}' - region: '{{ region }}' - state: running - wait: True - role: - - do_neat_stuff - - do_more_neat_stuff - -- name: Stop sandbox instances - hosts: localhost - gather_facts: false - connection: local - vars: - instance_ids: - - 'i-xxxxxx' - - 'i-xxxxxx' - - 'i-xxxxxx' - region: us-east-1 - tasks: - - name: Stop the sanbox instances - local_action: - module: ec2 - instance_ids: '{{ instance_ids }}' - region: '{{ region }}' - state: stopped - wait: True - -# -# Enforce that 5 instances with a tag "foo" are running -# - -- local_action: - module: ec2 - key_name: mykey - instance_type: c1.medium - image: emi-40603AD1 - wait: yes - group: webserver - instance_tags: - foo: bar - exact_count: 5 - count_tag: foo - -# -# Enforce that 5 running instances named "database" with a "dbtype" of "postgres" -# - -- local_action: - module: ec2 - key_name: mykey - instance_type: c1.medium - image: emi-40603AD1 - wait: yes - group: webserver - instance_tags: - Name: database - dbtype: postgres - exact_count: 5 - count_tag: - Name: database - dbtype: postgres - -# -# count_tag complex argument examples -# - - # instances with tag foo - count_tag: - foo: - - # instances with tag foo=bar - count_tag: - foo: bar - - # instances with tags foo=bar & baz - count_tag: - foo: bar - baz: - - # instances with tags foo & bar & baz=bang - count_tag: - - foo - - bar - - baz: bang - -''' - -import sys -import time -from ast import literal_eval - -try: - import boto.ec2 - from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping - from boto.exception import EC2ResponseError -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def find_running_instances_by_count_tag(module, ec2, count_tag): - - # get reservations for instances that match tag(s) and are running - reservations = get_reservations(module, ec2, tags=count_tag, state="running") - - instances = [] - for res in reservations: - if hasattr(res, 'instances'): - for inst in res.instances: - instances.append(inst) - - return reservations, instances - - -def _set_none_to_blank(dictionary): - result = dictionary - for k in result.iterkeys(): - if type(result[k]) == dict: - result[k] = _set_non_to_blank(result[k]) - elif not result[k]: - result[k] = "" - return result - - -def get_reservations(module, ec2, tags=None, state=None): - - # TODO: filters do not work with tags that have underscores - filters = dict() - - if tags is not None: - - if type(tags) is str: - try: - tags = literal_eval(tags) - except: - pass - - # if string, we only care that a tag of that name exists - if type(tags) is str: - filters.update({"tag-key": tags}) - - # if list, append each item to filters - if type(tags) is list: - for x in tags: - if type(x) is dict: - x = _set_none_to_blank(x) - filters.update(dict(("tag:"+tn, tv) for (tn,tv) in x.iteritems())) - else: - filters.update({"tag-key": x}) - - # if dict, add the key and value to the filter - if type(tags) is dict: - tags = _set_none_to_blank(tags) - filters.update(dict(("tag:"+tn, tv) for (tn,tv) in tags.iteritems())) - - if state: - # http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api - filters.update({'instance-state-name': state}) - - results = ec2.get_all_instances(filters=filters) - - return results - -def get_instance_info(inst): - """ - Retrieves instance information from an instance - ID and returns it as a dictionary - """ - instance_info = {'id': inst.id, - 'ami_launch_index': inst.ami_launch_index, - 'private_ip': inst.private_ip_address, - 'private_dns_name': inst.private_dns_name, - 'public_ip': inst.ip_address, - 'dns_name': inst.dns_name, - 'public_dns_name': inst.public_dns_name, - 'state_code': inst.state_code, - 'architecture': inst.architecture, - 'image_id': inst.image_id, - 'key_name': inst.key_name, - 'placement': inst.placement, - 'region': inst.placement[:-1], - 'kernel': inst.kernel, - 'ramdisk': inst.ramdisk, - 'launch_time': inst.launch_time, - 'instance_type': inst.instance_type, - 'root_device_type': inst.root_device_type, - 'root_device_name': inst.root_device_name, - 'state': inst.state, - 'hypervisor': inst.hypervisor} - try: - instance_info['virtualization_type'] = getattr(inst,'virtualization_type') - except AttributeError: - instance_info['virtualization_type'] = None - - try: - instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized') - except AttributeError: - instance_info['ebs_optimized'] = False - - return instance_info - -def boto_supports_associate_public_ip_address(ec2): - """ - Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification - class. Added in Boto 2.13.0 - - ec2: authenticated ec2 connection object - - Returns: - True if Boto library accepts associate_public_ip_address argument, else false - """ - - try: - network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification() - getattr(network_interface, "associate_public_ip_address") - return True - except AttributeError: - return False - -def boto_supports_profile_name_arg(ec2): - """ - Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0 - - ec2: authenticated ec2 connection object - - Returns: - True if Boto library accept instance_profile_name argument, else false - """ - run_instances_method = getattr(ec2, 'run_instances') - return 'instance_profile_name' in run_instances_method.func_code.co_varnames - -def create_block_device(module, ec2, volume): - # Not aware of a way to determine this programatically - # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/ - MAX_IOPS_TO_SIZE_RATIO = 30 - if 'snapshot' not in volume and 'ephemeral' not in volume: - if 'volume_size' not in volume: - module.fail_json(msg = 'Size must be specified when creating a new volume or modifying the root volume') - if 'snapshot' in volume: - if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume: - module.fail_json(msg = 'io1 volumes must have an iops value set') - if 'iops' in volume: - snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0] - size = volume.get('volume_size', snapshot.volume_size) - if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size: - module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO) - if 'ephemeral' in volume: - if 'snapshot' in volume: - module.fail_json(msg = 'Cannot set both ephemeral and snapshot') - return BlockDeviceType(snapshot_id=volume.get('snapshot'), - ephemeral_name=volume.get('ephemeral'), - size=volume.get('volume_size'), - volume_type=volume.get('device_type'), - delete_on_termination=volume.get('delete_on_termination', False), - iops=volume.get('iops')) - -def boto_supports_param_in_spot_request(ec2, param): - """ - Check if Boto library has a in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0. - - ec2: authenticated ec2 connection object - - Returns: - True if boto library has the named param as an argument on the request_spot_instances method, else False - """ - method = getattr(ec2, 'request_spot_instances') - return param in method.func_code.co_varnames - -def enforce_count(module, ec2): - - exact_count = module.params.get('exact_count') - count_tag = module.params.get('count_tag') - - reservations, instances = find_running_instances_by_count_tag(module, ec2, count_tag) - - changed = None - checkmode = False - instance_dict_array = [] - changed_instance_ids = None - - if len(instances) == exact_count: - changed = False - elif len(instances) < exact_count: - changed = True - to_create = exact_count - len(instances) - if not checkmode: - (instance_dict_array, changed_instance_ids, changed) \ - = create_instances(module, ec2, override_count=to_create) - - for inst in instance_dict_array: - instances.append(inst) - elif len(instances) > exact_count: - changed = True - to_remove = len(instances) - exact_count - if not checkmode: - all_instance_ids = sorted([ x.id for x in instances ]) - remove_ids = all_instance_ids[0:to_remove] - - instances = [ x for x in instances if x.id not in remove_ids] - - (changed, instance_dict_array, changed_instance_ids) \ - = terminate_instances(module, ec2, remove_ids) - terminated_list = [] - for inst in instance_dict_array: - inst['state'] = "terminated" - terminated_list.append(inst) - instance_dict_array = terminated_list - - # ensure all instances are dictionaries - all_instances = [] - for inst in instances: - if type(inst) is not dict: - inst = get_instance_info(inst) - all_instances.append(inst) - - return (all_instances, instance_dict_array, changed_instance_ids, changed) - - -def create_instances(module, ec2, override_count=None): - """ - Creates new instances - - module : AnsibleModule object - ec2: authenticated ec2 connection object - - Returns: - A list of dictionaries with instance information - about the instances that were launched - """ - - key_name = module.params.get('key_name') - id = module.params.get('id') - group_name = module.params.get('group') - group_id = module.params.get('group_id') - zone = module.params.get('zone') - instance_type = module.params.get('instance_type') - spot_price = module.params.get('spot_price') - image = module.params.get('image') - if override_count: - count = override_count - else: - count = module.params.get('count') - monitoring = module.params.get('monitoring') - kernel = module.params.get('kernel') - ramdisk = module.params.get('ramdisk') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - spot_wait_timeout = int(module.params.get('spot_wait_timeout')) - placement_group = module.params.get('placement_group') - user_data = module.params.get('user_data') - instance_tags = module.params.get('instance_tags') - vpc_subnet_id = module.params.get('vpc_subnet_id') - assign_public_ip = module.boolean(module.params.get('assign_public_ip')) - private_ip = module.params.get('private_ip') - instance_profile_name = module.params.get('instance_profile_name') - volumes = module.params.get('volumes') - ebs_optimized = module.params.get('ebs_optimized') - exact_count = module.params.get('exact_count') - count_tag = module.params.get('count_tag') - source_dest_check = module.boolean(module.params.get('source_dest_check')) - - # group_id and group_name are exclusive of each other - if group_id and group_name: - module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)")) - sys.exit(1) - - try: - # Here we try to lookup the group id from the security group name - if group is set. - if group_name: - grp_details = ec2.get_all_security_groups() - if type(group_name) == list: - group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] - elif type(group_name) == str: - for grp in grp_details: - if str(group_name) in str(grp): - group_id = [str(grp.id)] - group_name = [group_name] - # Now we try to lookup the group id testing if group exists. - elif group_id: - #wrap the group_id in a list if it's not one already - if type(group_id) == str: - group_id = [group_id] - grp_details = ec2.get_all_security_groups(group_ids=group_id) - grp_item = grp_details[0] - group_name = [grp_item.name] - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - - # Lookup any instances that much our run id. - - running_instances = [] - count_remaining = int(count) - - if id != None: - filter_dict = {'client-token':id, 'instance-state-name' : 'running'} - previous_reservations = ec2.get_all_instances(None, filter_dict) - for res in previous_reservations: - for prev_instance in res.instances: - running_instances.append(prev_instance) - count_remaining = count_remaining - len(running_instances) - - # Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want. - - if count_remaining == 0: - changed = False - else: - changed = True - try: - params = {'image_id': image, - 'key_name': key_name, - 'monitoring_enabled': monitoring, - 'placement': zone, - 'instance_type': instance_type, - 'kernel_id': kernel, - 'ramdisk_id': ramdisk, - 'user_data': user_data} - - if ebs_optimized: - params['ebs_optimized'] = ebs_optimized - - if boto_supports_profile_name_arg(ec2): - params['instance_profile_name'] = instance_profile_name - else: - if instance_profile_name is not None: - module.fail_json( - msg="instance_profile_name parameter requires Boto version 2.5.0 or higher") - - if assign_public_ip: - if not boto_supports_associate_public_ip_address(ec2): - module.fail_json( - msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.") - elif not vpc_subnet_id: - module.fail_json( - msg="assign_public_ip only available with vpc_subnet_id") - - else: - if private_ip: - interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( - subnet_id=vpc_subnet_id, - private_ip_address=private_ip, - groups=group_id, - associate_public_ip_address=assign_public_ip) - else: - interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( - subnet_id=vpc_subnet_id, - groups=group_id, - associate_public_ip_address=assign_public_ip) - interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) - params['network_interfaces'] = interfaces - else: - params['subnet_id'] = vpc_subnet_id - if vpc_subnet_id: - params['security_group_ids'] = group_id - else: - params['security_groups'] = group_name - - if volumes: - bdm = BlockDeviceMapping() - for volume in volumes: - if 'device_name' not in volume: - module.fail_json(msg = 'Device name must be set for volume') - # Minimum volume size is 1GB. We'll use volume size explicitly set to 0 - # to be a signal not to create this volume - if 'volume_size' not in volume or int(volume['volume_size']) > 0: - bdm[volume['device_name']] = create_block_device(module, ec2, volume) - - params['block_device_map'] = bdm - - # check to see if we're using spot pricing first before starting instances - if not spot_price: - if assign_public_ip and private_ip: - params.update(dict( - min_count = count_remaining, - max_count = count_remaining, - client_token = id, - placement_group = placement_group, - )) - else: - params.update(dict( - min_count = count_remaining, - max_count = count_remaining, - client_token = id, - placement_group = placement_group, - private_ip_address = private_ip, - )) - - res = ec2.run_instances(**params) - instids = [ i.id for i in res.instances ] - while True: - try: - ec2.get_all_instances(instids) - break - except boto.exception.EC2ResponseError as e: - if "InvalidInstanceID.NotFound" in str(e): - # there's a race between start and get an instance - continue - else: - module.fail_json(msg = str(e)) - else: - if private_ip: - module.fail_json( - msg='private_ip only available with on-demand (non-spot) instances') - if boto_supports_param_in_spot_request(ec2, placement_group): - params['placement_group'] = placement_group - elif placement_group : - module.fail_json( - msg="placement_group parameter requires Boto version 2.3.0 or higher.") - - params.update(dict( - count = count_remaining, - )) - res = ec2.request_spot_instances(spot_price, **params) - - # Now we have to do the intermediate waiting - if wait: - spot_req_inst_ids = dict() - spot_wait_timeout = time.time() + spot_wait_timeout - while spot_wait_timeout > time.time(): - reqs = ec2.get_all_spot_instance_requests() - for sirb in res: - if sirb.id in spot_req_inst_ids: - continue - for sir in reqs: - if sir.id == sirb.id and sir.instance_id is not None: - spot_req_inst_ids[sirb.id] = sir.instance_id - if len(spot_req_inst_ids) < count: - time.sleep(5) - else: - break - if spot_wait_timeout <= time.time(): - module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime()) - instids = spot_req_inst_ids.values() - except boto.exception.BotoServerError, e: - module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message)) - - # The instances returned through run_instances can be in - # terminated state due to idempotency. - terminated_instances = [ str(instance.id) for instance in res.instances - if instance.state == 'terminated' ] - if terminated_instances: - module.fail_json(msg = "Instances with id(s) %s " % terminated_instances + - "were created previously but have since been terminated - " + - "use a (possibly different) 'instanceid' parameter") - - # wait here until the instances are up - num_running = 0 - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time() and num_running < len(instids): - try: - res_list = ec2.get_all_instances(instids) - except boto.exception.BotoServerError, e: - if e.error_code == 'InvalidInstanceID.NotFound': - time.sleep(1) - continue - else: - raise - - num_running = 0 - for res in res_list: - num_running += len([ i for i in res.instances if i.state=='running' ]) - if len(res_list) <= 0: - # got a bad response of some sort, possibly due to - # stale/cached data. Wait a second and then try again - time.sleep(1) - continue - if wait and num_running < len(instids): - time.sleep(5) - else: - break - - if wait and wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime()) - - #We do this after the loop ends so that we end up with one list - for res in res_list: - running_instances.extend(res.instances) - - # Enabled by default by Amazon - if not source_dest_check: - for inst in res.instances: - inst.modify_attribute('sourceDestCheck', False) - - # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound - if instance_tags: - try: - ec2.create_tags(instids, instance_tags) - except boto.exception.EC2ResponseError, e: - module.fail_json(msg = "Instance tagging failed => %s: %s" % (e.error_code, e.error_message)) - - instance_dict_array = [] - created_instance_ids = [] - for inst in running_instances: - d = get_instance_info(inst) - created_instance_ids.append(inst.id) - instance_dict_array.append(d) - - return (instance_dict_array, created_instance_ids, changed) - - -def terminate_instances(module, ec2, instance_ids): - """ - Terminates a list of instances - - module: Ansible module object - ec2: authenticated ec2 connection object - termination_list: a list of instances to terminate in the form of - [ {id: }, ..] - - Returns a dictionary of instance information - about the instances terminated. - - If the instance to be terminated is running - "changed" will be set to False. - - """ - - # Whether to wait for termination to complete before returning - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - changed = False - instance_dict_array = [] - - if not isinstance(instance_ids, list) or len(instance_ids) < 1: - module.fail_json(msg='instance_ids should be a list of instances, aborting') - - terminated_instance_ids = [] - for res in ec2.get_all_instances(instance_ids): - for inst in res.instances: - if inst.state == 'running' or inst.state == 'stopped': - terminated_instance_ids.append(inst.id) - instance_dict_array.append(get_instance_info(inst)) - try: - ec2.terminate_instances([inst.id]) - except EC2ResponseError, e: - module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e)) - changed = True - - # wait here until the instances are 'terminated' - if wait: - num_terminated = 0 - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids): - response = ec2.get_all_instances( \ - instance_ids=terminated_instance_ids, \ - filters={'instance-state-name':'terminated'}) - try: - num_terminated = len(response.pop().instances) - except Exception, e: - # got a bad response of some sort, possibly due to - # stale/cached data. Wait a second and then try again - time.sleep(1) - continue - - if num_terminated < len(terminated_instance_ids): - time.sleep(5) - - # waiting took too long - if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids): - module.fail_json(msg = "wait for instance termination timeout on %s" % time.asctime()) - - return (changed, instance_dict_array, terminated_instance_ids) - - -def startstop_instances(module, ec2, instance_ids, state): - """ - Starts or stops a list of existing instances - - module: Ansible module object - ec2: authenticated ec2 connection object - instance_ids: The list of instances to start in the form of - [ {id: }, ..] - state: Intended state ("running" or "stopped") - - Returns a dictionary of instance information - about the instances started/stopped. - - If the instance was not able to change state, - "changed" will be set to False. - - """ - - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - changed = False - instance_dict_array = [] - - if not isinstance(instance_ids, list) or len(instance_ids) < 1: - module.fail_json(msg='instance_ids should be a list of instances, aborting') - - # Check that our instances are not in the state we want to take them to - # and change them to our desired state - running_instances_array = [] - for res in ec2.get_all_instances(instance_ids): - for inst in res.instances: - if inst.state != state: - instance_dict_array.append(get_instance_info(inst)) - try: - if state == 'running': - inst.start() - else: - inst.stop() - except EC2ResponseError, e: - module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) - changed = True - - ## Wait for all the instances to finish starting or stopping - wait_timeout = time.time() + wait_timeout - while wait and wait_timeout > time.time(): - instance_dict_array = [] - matched_instances = [] - for res in ec2.get_all_instances(instance_ids): - for i in res.instances: - if i.state == state: - instance_dict_array.append(get_instance_info(i)) - matched_instances.append(i) - if len(matched_instances) < len(instance_ids): - time.sleep(5) - else: - break - - if wait and wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime()) - - return (changed, instance_dict_array, instance_ids) - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - key_name = dict(aliases = ['keypair']), - id = dict(), - group = dict(type='list'), - group_id = dict(type='list'), - zone = dict(aliases=['aws_zone', 'ec2_zone']), - instance_type = dict(aliases=['type']), - spot_price = dict(), - image = dict(), - kernel = dict(), - count = dict(type='int', default='1'), - monitoring = dict(type='bool', default=False), - ramdisk = dict(), - wait = dict(type='bool', default=False), - wait_timeout = dict(default=300), - spot_wait_timeout = dict(default=600), - placement_group = dict(), - user_data = dict(), - instance_tags = dict(type='dict'), - vpc_subnet_id = dict(), - assign_public_ip = dict(type='bool', default=False), - private_ip = dict(), - instance_profile_name = dict(), - instance_ids = dict(type='list'), - source_dest_check = dict(type='bool', default=True), - state = dict(default='present'), - exact_count = dict(type='int', default=None), - count_tag = dict(), - volumes = dict(type='list'), - ebs_optimized = dict(type='bool', default=False), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive = [ - ['exact_count', 'count'], - ['exact_count', 'state'], - ['exact_count', 'instance_ids'] - ], - ) - - ec2 = ec2_connect(module) - - tagged_instances = [] - - state = module.params.get('state') - - if state == 'absent': - instance_ids = module.params.get('instance_ids') - if not isinstance(instance_ids, list): - module.fail_json(msg='termination_list needs to be a list of instances to terminate') - - (changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids) - - elif state in ('running', 'stopped'): - instance_ids = module.params.get('instance_ids') - if not isinstance(instance_ids, list): - module.fail_json(msg='running list needs to be a list of instances to run: %s' % instance_ids) - - (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state) - - elif state == 'present': - # Changed is always set to true when provisioning new instances - if not module.params.get('image'): - module.fail_json(msg='image parameter is required for new instance') - - if module.params.get('exact_count') is None: - (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2) - else: - (tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2) - - module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/ec2_ami b/library/cloud/ec2_ami deleted file mode 100644 index 3baf70a438f..00000000000 --- a/library/cloud/ec2_ami +++ /dev/null @@ -1,273 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ec2_ami -version_added: "1.3" -short_description: create or destroy an image in ec2, return imageid -description: - - Creates or deletes ec2 images. This module has a dependency on python-boto >= 2.5 -options: - instance_id: - description: - - instance id of the image to create - required: false - default: null - aliases: [] - name: - description: - - The name of the new image to create - required: false - default: null - aliases: [] - wait: - description: - - wait for the AMI to be in state 'available' before returning. - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 - aliases: [] - state: - description: - - create or deregister/delete image - required: false - default: 'present' - aliases: [] - region: - description: - - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - default: null - aliases: [ 'aws_region', 'ec2_region' ] - description: - description: - - An optional human-readable string describing the contents and purpose of the AMI. - required: false - default: null - aliases: [] - no_reboot: - description: - - An optional flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the responsibility of maintaining file system integrity is left to the owner of the instance. The default choice is "no". - required: false - default: no - choices: [ "yes", "no" ] - aliases: [] - image_id: - description: - - Image ID to be deregistered. - required: false - default: null - aliases: [] - delete_snapshot: - description: - - Whether or not to deleted an AMI while deregistering it. - required: false - default: null - aliases: [] - -author: Evan Duffield -extends_documentation_fragment: aws -''' - -# Thank you to iAcquire for sponsoring development of this module. -# -# See http://alestic.com/2011/06/ec2-ami-security for more information about ensuring the security of your AMI. - -EXAMPLES = ''' -# Basic AMI Creation -- local_action: - module: ec2_ami - aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx - aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - instance_id: i-xxxxxx - wait: yes - name: newtest - register: instance - -# Basic AMI Creation, without waiting -- local_action: - module: ec2_ami - aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx - aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - region: xxxxxx - instance_id: i-xxxxxx - wait: no - name: newtest - register: instance - -# Deregister/Delete AMI -- local_action: - module: ec2_ami - aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx - aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - region: xxxxxx - image_id: ${instance.image_id} - delete_snapshot: True - state: absent - -# Deregister AMI -- local_action: - module: ec2_ami - aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx - aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - region: xxxxxx - image_id: ${instance.image_id} - delete_snapshot: False - state: absent - -''' -import sys -import time - -try: - import boto - import boto.ec2 -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def create_image(module, ec2): - """ - Creates new AMI - - module : AnsibleModule object - ec2: authenticated ec2 connection object - """ - - instance_id = module.params.get('instance_id') - name = module.params.get('name') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - description = module.params.get('description') - no_reboot = module.params.get('no_reboot') - - try: - params = {'instance_id': instance_id, - 'name': name, - 'description': description, - 'no_reboot': no_reboot} - - image_id = ec2.create_image(**params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - # Wait until the image is recognized. EC2 API has eventual consistency, - # such that a successful CreateImage API call doesn't guarantee the success - # of subsequent DescribeImages API call using the new image id returned. - for i in range(wait_timeout): - try: - img = ec2.get_image(image_id) - break - except boto.exception.EC2ResponseError, e: - if 'InvalidAMIID.NotFound' in e.error_code and wait: - time.sleep(1) - else: - module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help.") - else: - module.fail_json(msg="timed out waiting for image to be recognized") - - # wait here until the image is created - wait_timeout = time.time() + wait_timeout - while wait and wait_timeout > time.time() and (img is None or img.state != 'available'): - img = ec2.get_image(image_id) - time.sleep(3) - if wait and wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "timed out waiting for image to be created") - - module.exit_json(msg="AMI creation operation complete", image_id=image_id, state=img.state, changed=True) - - -def deregister_image(module, ec2): - """ - Deregisters AMI - """ - - image_id = module.params.get('image_id') - delete_snapshot = module.params.get('delete_snapshot') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - img = ec2.get_image(image_id) - if img == None: - module.fail_json(msg = "Image %s does not exist" % image_id, changed=False) - - try: - params = {'image_id': image_id, - 'delete_snapshot': delete_snapshot} - - res = ec2.deregister_image(**params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - # wait here until the image is gone - img = ec2.get_image(image_id) - wait_timeout = time.time() + wait_timeout - while wait and wait_timeout > time.time() and img is not None: - img = ec2.get_image(image_id) - time.sleep(3) - if wait and wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "timed out waiting for image to be reregistered/deleted") - - module.exit_json(msg="AMI deregister/delete operation complete", changed=True) - sys.exit(0) - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - instance_id = dict(), - image_id = dict(), - delete_snapshot = dict(), - name = dict(), - wait = dict(type="bool", default=False), - wait_timeout = dict(default=900), - description = dict(default=""), - no_reboot = dict(default=False, type="bool"), - state = dict(default='present'), - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - ec2 = ec2_connect(module) - - if module.params.get('state') == 'absent': - if not module.params.get('image_id'): - module.fail_json(msg='image_id needs to be an ami image to registered/delete') - - deregister_image(module, ec2) - - elif module.params.get('state') == 'present': - # Changed is always set to true when provisioning new AMI - if not module.params.get('instance_id'): - module.fail_json(msg='instance_id parameter is required for new image') - if not module.params.get('name'): - module.fail_json(msg='name parameter is required for new image') - create_image(module, ec2) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() - diff --git a/library/cloud/ec2_ami_search b/library/cloud/ec2_ami_search deleted file mode 100644 index 25875de39bd..00000000000 --- a/library/cloud/ec2_ami_search +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/python -# -# (c) 2013, Nimbis Services -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -DOCUMENTATION = ''' ---- -module: ec2_ami_search -short_description: Retrieve AWS AMI for a given operating system. -version_added: "1.6" -description: - - Look up the most recent AMI on AWS for a given operating system. - - Returns C(ami), C(aki), C(ari), C(serial), C(tag) - - If there is no AKI or ARI associated with an image, these will be C(null). - - Only supports images from cloud-images.ubuntu.com - - 'Example output: C({"ami": "ami-69f5a900", "changed": false, "aki": "aki-88aa75e1", "tag": "release", "ari": null, "serial": "20131024"})' -version_added: "1.6" -options: - distro: - description: Linux distribution (e.g., C(ubuntu)) - required: true - choices: ["ubuntu"] - release: - description: short name of the release (e.g., C(precise)) - required: true - stream: - description: Type of release. - required: false - default: "server" - choices: ["server", "desktop"] - store: - description: Back-end store for instance - required: false - default: "ebs" - choices: ["ebs", "ebs-io1", "ebs-ssd", "instance-store"] - arch: - description: CPU architecture - required: false - default: "amd64" - choices: ["i386", "amd64"] - region: - description: EC2 region - required: false - default: us-east-1 - choices: ["ap-northeast-1", "ap-southeast-1", "ap-southeast-2", - "eu-west-1", "sa-east-1", "us-east-1", "us-west-1", "us-west-2", "us-gov-west-1"] - virt: - description: virutalization type - required: false - default: paravirtual - choices: ["paravirtual", "hvm"] - -author: Lorin Hochstein -''' - -EXAMPLES = ''' -- name: Launch an Ubuntu 12.04 (Precise Pangolin) EC2 instance - hosts: 127.0.0.1 - connection: local - tasks: - - name: Get the Ubuntu precise AMI - ec2_ami_search: distro=ubuntu release=precise region=us-west-1 store=instance-store - register: ubuntu_image - - name: Start the EC2 instance - ec2: image={{ ubuntu_image.ami }} instance_type=m1.small key_name=mykey -''' - -import csv -import json -import urllib2 -import urlparse - -SUPPORTED_DISTROS = ['ubuntu'] - -AWS_REGIONS = ['ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2'] - - -def get_url(module, url): - """ Get url and return response """ - try: - r = urllib2.urlopen(url) - except (urllib2.HTTPError, urllib2.URLError), e: - code = getattr(e, 'code', -1) - module.fail_json(msg="Request failed: %s" % str(e), status_code=code) - return r - - -def ubuntu(module): - """ Get the ami for ubuntu """ - - release = module.params['release'] - stream = module.params['stream'] - store = module.params['store'] - arch = module.params['arch'] - region = module.params['region'] - virt = module.params['virt'] - - url = get_ubuntu_url(release, stream) - - req = get_url(module, url) - reader = csv.reader(req, delimiter='\t') - try: - ami, aki, ari, tag, serial = lookup_ubuntu_ami(reader, release, stream, - store, arch, region, virt) - module.exit_json(changed=False, ami=ami, aki=aki, ari=ari, tag=tag, - serial=serial) - except KeyError: - module.fail_json(msg="No matching AMI found") - - -def lookup_ubuntu_ami(table, release, stream, store, arch, region, virt): - """ Look up the Ubuntu AMI that matches query given a table of AMIs - - table: an iterable that returns a row of - (release, stream, tag, serial, region, ami, aki, ari, virt) - release: ubuntu release name - stream: 'server' or 'desktop' - store: 'ebs', 'ebs-io1', 'ebs-ssd' or 'instance-store' - arch: 'i386' or 'amd64' - region: EC2 region - virt: 'paravirtual' or 'hvm' - - Returns (ami, aki, ari, tag, serial)""" - expected = (release, stream, store, arch, region, virt) - - for row in table: - (actual_release, actual_stream, tag, serial, - actual_store, actual_arch, actual_region, ami, aki, ari, - actual_virt) = row - actual = (actual_release, actual_stream, actual_store, actual_arch, - actual_region, actual_virt) - if actual == expected: - # aki and ari are sometimes blank - if aki == '': - aki = None - if ari == '': - ari = None - return (ami, aki, ari, tag, serial) - - raise KeyError() - - -def get_ubuntu_url(release, stream): - url = "https://cloud-images.ubuntu.com/query/%s/%s/released.current.txt" - return url % (release, stream) - - -def main(): - arg_spec = dict( - distro=dict(required=True, choices=SUPPORTED_DISTROS), - release=dict(required=True), - stream=dict(required=False, default='server', - choices=['desktop', 'server']), - store=dict(required=False, default='ebs', - choices=['ebs', 'ebs-io1', 'ebs-ssd', 'instance-store']), - arch=dict(required=False, default='amd64', - choices=['i386', 'amd64']), - region=dict(required=False, default='us-east-1', choices=AWS_REGIONS), - virt=dict(required=False, default='paravirtual', - choices=['paravirtual', 'hvm']) - ) - module = AnsibleModule(argument_spec=arg_spec) - distro = module.params['distro'] - - if distro == 'ubuntu': - ubuntu(module) - else: - module.fail_json(msg="Unsupported distro: %s" % distro) - - - -# this is magic, see lib/ansible/module_common.py -#<> - -if __name__ == '__main__': - main() diff --git a/library/cloud/ec2_asg b/library/cloud/ec2_asg deleted file mode 100755 index 3fc033e6d65..00000000000 --- a/library/cloud/ec2_asg +++ /dev/null @@ -1,608 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ ---- -module: ec2_asg -short_description: Create or delete AWS Autoscaling Groups -description: - - Can create or delete AWS Autoscaling Groups - - Works with the ec2_lc module to manage Launch Configurations -version_added: "1.6" -author: Gareth Rushgrove -options: - state: - description: - - register or deregister the instance - required: true - choices: ['present', 'absent'] - name: - description: - - Unique name for group to be created or deleted - required: true - load_balancers: - description: - - List of ELB names to use for the group - required: false - availability_zones: - description: - - List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set. - required: false - launch_config_name: - description: - - Name of the Launch configuration to use for the group. See the ec2_lc module for managing these. - required: false - min_size: - description: - - Minimum number of instances in group - required: false - max_size: - description: - - Maximum number of instances in group - required: false - desired_capacity: - description: - - Desired number of instances in group - required: false - replace_all_instances: - description: - - In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuraiton. - required: false - version_added: "1.8" - default: False - replace_batch_size: - description: - - Number of instances you'd like to replace at a time. Used with replace_all_instances. - required: false - version_added: "1.8" - default: 1 - replace_instances: - description: - - List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration. - required: false - version_added: "1.8" - default: None - lc_check: - description: - - Check to make sure instances that are being replaced with replace_instances do not aready have the current launch_config. - required: false - version_added: "1.8" - default: True - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - vpc_zone_identifier: - description: - - List of VPC subnets to use - required: false - default: None - tags: - description: - - A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true. - required: false - default: None - version_added: "1.7" - health_check_period: - description: - - Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. - required: false - default: 500 seconds - version_added: "1.7" - health_check_type: - description: - - The service you want the health status from, Amazon EC2 or Elastic Load Balancer. - required: false - default: EC2 - version_added: "1.7" - choices: ['EC2', 'ELB'] - wait_timeout: - description: - - how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option. - default: 300 - version_added: "1.8" -extends_documentation_fragment: aws -""" - -EXAMPLES = ''' -A basic example of configuration: - -- ec2_asg: - name: special - load_balancers: 'lb1,lb2' - availability_zones: 'eu-west-1a,eu-west-1b' - launch_config_name: 'lc-1' - min_size: 1 - max_size: 10 - desired_capacity: 5 - vpc_zone_identifier: 'subnet-abcd1234,subnet-1a2b3c4d' - tags: - - environment: production - propagate_at_launch: no - -Below is an example of how to assign a new launch config to an ASG and terminate old instances. - -All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in -a rolling fashion with instances using the current launch configuration, "my_new_lc". - -This could also be considered a rolling deploy of a pre-baked AMI. - -If this is a newly created group, the instances will not be replaced since all instances -will have the current launch configuration. - -- name: create launch config - ec2_lc: - name: my_new_lc - image_id: ami-lkajsf - key_name: mykey - region: us-east-1 - security_groups: sg-23423 - instance_type: m1.small - assign_public_ip: yes - -- ec2_asg: - name: myasg - launch_config_name: my_new_lc - health_check_period: 60 - health_check_type: ELB - replace_all_instances: yes - min_size: 5 - max_size: 5 - desired_capacity: 5 - region: us-east-1 - -To only replace a couple of instances instead of all of them, supply a list -to "replace_instances": - -- ec2_asg: - name: myasg - launch_config_name: my_new_lc - health_check_period: 60 - health_check_type: ELB - replace_instances: - - i-b345231 - - i-24c2931 - min_size: 5 - max_size: 5 - desired_capacity: 5 - region: us-east-1 -''' - -import sys -import time - -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -try: - import boto.ec2.autoscale - from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, Tag - from boto.exception import BotoServerError -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity', - 'health_check_period', 'health_check_type', 'launch_config_name', - 'load_balancers', 'max_size', 'min_size', 'name', 'placement_group', - 'tags', 'termination_policies', 'vpc_zone_identifier') - -INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') - -def enforce_required_arguments(module): - ''' As many arguments are not required for autoscale group deletion - they cannot be mandatory arguments for the module, so we enforce - them here ''' - missing_args = [] - for arg in ('min_size', 'max_size', 'launch_config_name'): - if module.params[arg] is None: - missing_args.append(arg) - if missing_args: - module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args)) - - -def get_properties(autoscaling_group): - properties = dict((attr, getattr(autoscaling_group, attr)) for attr in ASG_ATTRIBUTES) - properties['healthy_instances'] = 0 - properties['in_service_instances'] = 0 - properties['unhealthy_instances'] = 0 - properties['pending_instances'] = 0 - properties['viable_instances'] = 0 - properties['terminating_instances'] = 0 - - if autoscaling_group.instances: - properties['instances'] = [i.instance_id for i in autoscaling_group.instances] - instance_facts = {} - for i in autoscaling_group.instances: - instance_facts[i.instance_id] = {'health_status': i.health_status, - 'lifecycle_state': i.lifecycle_state, - 'launch_config_name': i.launch_config_name } - if i.health_status == 'Healthy' and i.lifecycle_state == 'InService': - properties['viable_instances'] += 1 - if i.health_status == 'Healthy': - properties['healthy_instances'] += 1 - else: - properties['unhealthy_instances'] += 1 - if i.lifecycle_state == 'InService': - properties['in_service_instances'] += 1 - if i.lifecycle_state == 'Terminating': - properties['terminating_instances'] += 1 - if i.lifecycle_state == 'Pending': - properties['pending_instances'] += 1 - properties['instance_facts'] = instance_facts - properties['load_balancers'] = autoscaling_group.load_balancers - return properties - - -def create_autoscaling_group(connection, module): - - group_name = module.params.get('name') - load_balancers = module.params['load_balancers'] - availability_zones = module.params['availability_zones'] - launch_config_name = module.params.get('launch_config_name') - min_size = module.params['min_size'] - max_size = module.params['max_size'] - desired_capacity = module.params.get('desired_capacity') - vpc_zone_identifier = module.params.get('vpc_zone_identifier') - set_tags = module.params.get('tags') - health_check_period = module.params.get('health_check_period') - health_check_type = module.params.get('health_check_type') - - as_groups = connection.get_all_groups(names=[group_name]) - - if not vpc_zone_identifier and not availability_zones: - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - try: - ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - - asg_tags = [] - for tag in set_tags: - if tag.has_key('key') and tag.has_key('value'): # this block is to support depricated form - asg_tags.append(Tag(key=tag.get('key'), - value=tag.get('value'), - propagate_at_launch=bool(tag.get('propagate_at_launch', True)), - resource_id=group_name)) - else: - for k,v in tag.iteritems(): - if k !='propagate_at_launch': - asg_tags.append(Tag(key=k, - value=v, - propagate_at_launch=bool(tag.get('propagate_at_launch', True)), - resource_id=group_name)) - - if not as_groups: - if not vpc_zone_identifier and not availability_zones: - availability_zones = module.params['availability_zones'] = [zone.name for zone in ec2_connection.get_all_zones()] - enforce_required_arguments(module) - launch_configs = connection.get_all_launch_configurations(names=[launch_config_name]) - ag = AutoScalingGroup( - group_name=group_name, - load_balancers=load_balancers, - availability_zones=availability_zones, - launch_config=launch_configs[0], - min_size=min_size, - max_size=max_size, - desired_capacity=desired_capacity, - vpc_zone_identifier=vpc_zone_identifier, - connection=connection, - tags=asg_tags, - health_check_period=health_check_period, - health_check_type=health_check_type) - - try: - connection.create_auto_scaling_group(ag) - asg_properties = get_properties(ag) - changed = True - return(changed, asg_properties) - except BotoServerError, e: - module.fail_json(msg=str(e)) - else: - as_group = as_groups[0] - changed = False - for attr in ASG_ATTRIBUTES: - if module.params.get(attr): - module_attr = module.params.get(attr) - group_attr = getattr(as_group, attr) - # we do this because AWS and the module may return the same list - # sorted differently - try: - module_attr.sort() - except: - pass - try: - group_attr.sort() - except: - pass - if group_attr != module_attr: - changed = True - setattr(as_group, attr, module_attr) - - if len(set_tags) > 0: - existing_tags = as_group.tags - existing_tag_map = dict((tag.key, tag) for tag in existing_tags) - for tag in set_tags: - if tag.has_key('key') and tag.has_key('value'): # this is to support deprecated method - if 'key' not in tag: - continue - if ( not tag['key'] in existing_tag_map or - existing_tag_map[tag['key']].value != tag['value'] or - ('propagate_at_launch' in tag and - existing_tag_map[tag['key']].propagate_at_launch != tag['propagate_at_launch']) ): - changed = True - continue - else: - for k,v in tag.iteritems(): - if k !='propagate_at_launch': - if ( not k in existing_tag_map or - existing_tag_map[k].value != v or - ('propagate_at_launch' in tag and - existing_tag_map[k].propagate_at_launch != tag['propagate_at_launch']) ): - changed = True - continue - if changed: - connection.create_or_update_tags(asg_tags) - - # handle loadbalancers separately because None != [] - load_balancers = module.params.get('load_balancers') or [] - if load_balancers and as_group.load_balancers != load_balancers: - changed = True - as_group.load_balancers = module.params.get('load_balancers') - - try: - if changed: - as_group.update() - asg_properties = get_properties(as_group) - return(changed, asg_properties) - except BotoServerError, e: - module.fail_json(msg=str(e)) - - - result = as_groups[0] - module.exit_json(changed=changed, name=result.name, - autoscaling_group_arn=result.autoscaling_group_arn, - availability_zones=result.availability_zones, - created_time=str(result.created_time), - default_cooldown=result.default_cooldown, - health_check_period=result.health_check_period, - health_check_type=result.health_check_type, - instance_id=result.instance_id, - instances=[instance.instance_id for instance in result.instances], - launch_config_name=result.launch_config_name, - load_balancers=result.load_balancers, - min_size=result.min_size, max_size=result.max_size, - placement_group=result.placement_group, - wait_timeout = dict(default=300), - tags=result.tags, - termination_policies=result.termination_policies, - vpc_zone_identifier=result.vpc_zone_identifier) - - -def delete_autoscaling_group(connection, module): - group_name = module.params.get('name') - groups = connection.get_all_groups(names=[group_name]) - if groups: - group = groups[0] - group.max_size = 0 - group.min_size = 0 - group.desired_capacity = 0 - group.update() - instances = True - while instances: - tmp_groups = connection.get_all_groups(names=[group_name]) - if tmp_groups: - tmp_group = tmp_groups[0] - if not tmp_group.instances: - instances = False - time.sleep(10) - - group.delete() - changed=True - return changed - else: - changed=False - return changed - -def get_chunks(l, n): - for i in xrange(0, len(l), n): - yield l[i:i+n] - -def replace(connection, module): - - batch_size = module.params.get('replace_batch_size') - wait_timeout = module.params.get('wait_timeout') - group_name = module.params.get('group_name') - max_size = module.params.get('max_size') - min_size = module.params.get('min_size') - desired_capacity = module.params.get('desired_capacity') - replace_instances = module.params.get('replace_instances') - - - # wait for instance list to be populated on a newly provisioned ASG - instance_wait = time.time() + 30 - while instance_wait > time.time(): - as_group = connection.get_all_groups(names=[group_name])[0] - props = get_properties(as_group) - if props.has_key('instances'): - instances = props['instances'] - break - time.sleep(10) - if instance_wait <= time.time(): - # waiting took too long - module.fail_json(msg = "Waited too for instances to appear. %s" % time.asctime()) - # determine if we need to continue - replaceable = 0 - if replace_instances: - instances = replace_instances - for k in props['instance_facts'].keys(): - if k in instances: - if props['instance_facts'][k]['launch_config_name'] != props['launch_config_name']: - replaceable += 1 - if replaceable == 0: - changed = False - return(changed, props) - - # set temporary settings and wait for them to be reached - as_group.max_size = max_size + batch_size - as_group.min_size = min_size + batch_size - as_group.desired_capacity = desired_capacity + batch_size - as_group.update() - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time() and min_size + batch_size > props['viable_instances']: - time.sleep(10) - as_groups = connection.get_all_groups(names=[group_name]) - as_group = as_groups[0] - props = get_properties(as_group) - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "Waited too for instances to appear. %s" % time.asctime()) - instances = props['instances'] - if replace_instances: - instances = replace_instances - for i in get_chunks(instances, batch_size): - replace_batch(connection, module, i) - # return settings to normal - as_group = connection.get_all_groups(names=[group_name])[0] - as_group.max_size = max_size - as_group.min_size = min_size - as_group.desired_capacity = desired_capacity - as_group.update() - as_group = connection.get_all_groups(names=[group_name])[0] - asg_properties = get_properties(as_group) - changed=True - return(changed, asg_properties) - -def replace_batch(connection, module, replace_instances): - - - group_name = module.params.get('group_name') - wait_timeout = int(module.params.get('wait_timeout')) - lc_check = module.params.get('lc_check') - - as_group = connection.get_all_groups(names=[group_name])[0] - props = get_properties(as_group) - - # check to make sure instances given are actually in the given ASG - # and they have a non-current launch config - old_instances = [] - instances = ( inst_id for inst_id in replace_instances if inst_id in props['instances']) - - if lc_check: - for i in instances: - if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']: - old_instances.append(i) - else: - old_instances = instances - - # set all instances given to unhealthy - for instance_id in old_instances: - connection.set_instance_health(instance_id,'Unhealthy') - - # we wait to make sure the machines we marked as Unhealthy are - # no longer in the list - - count = 1 - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time() and count > 0: - count = 0 - as_group = connection.get_all_groups(names=[group_name])[0] - props = get_properties(as_group) - instance_facts = props['instance_facts'] - instances = ( i for i in instance_facts if i in old_instances) - for i in instances: - if ( instance_facts[i]['lifecycle_state'] == 'Terminating' - or instance_facts[i]['health_status'] == 'Unhealthy' ): - count += 1 - time.sleep(10) - - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime()) - - # make sure we have the latest stats after that last loop. - as_group = connection.get_all_groups(names=[group_name])[0] - props = get_properties(as_group) - - # now we make sure that we have enough instances in a viable state - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time() and props['min_size'] > props['viable_instances']: - time.sleep(10) - as_groups = connection.get_all_groups(names=[group_name]) - as_group = as_groups[0] - props = get_properties(as_group) - - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime()) - - # collect final stats info - as_group = connection.get_all_groups(names=[group_name])[0] - asg_properties = get_properties(as_group) - - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - name=dict(required=True, type='str'), - load_balancers=dict(type='list'), - availability_zones=dict(type='list'), - launch_config_name=dict(type='str'), - min_size=dict(type='int'), - max_size=dict(type='int'), - desired_capacity=dict(type='int'), - vpc_zone_identifier=dict(type='str'), - replace_batch_size=dict(type='int', default=1), - replace_all_instances=dict(type='bool', default=False), - replace_instances=dict(type='list', default=[]), - lc_check=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=300), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='list', default=[]), - health_check_period=dict(type='int', default=300), - health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - state = module.params.get('state') - replace_instances = module.params.get('replace_instances') - replace_all_instances = module.params.get('replace_all_instances') - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - try: - connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - if not connection: - module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region)) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - changed = False - if replace_all_instances and replace_instances: - module.fail_json(msg="You can't use replace_instances and replace_all_instances in the same task.") - if state == 'present': - create_changed, asg_properties=create_autoscaling_group(connection, module) - if replace_all_instances or replace_instances: - replace_changed, asg_properties=replace(connection, module) - elif state == 'absent': - changed = delete_autoscaling_group(connection, module) - module.exit_json( changed = changed ) - if create_changed or replace_changed: - changed = True - module.exit_json( changed = changed, **asg_properties ) - -main() diff --git a/library/cloud/ec2_eip b/library/cloud/ec2_eip deleted file mode 100644 index cff83e482b3..00000000000 --- a/library/cloud/ec2_eip +++ /dev/null @@ -1,305 +0,0 @@ -#!/usr/bin/python -DOCUMENTATION = ''' ---- -module: ec2_eip -short_description: associate an EC2 elastic IP with an instance. -description: - - This module associates AWS EC2 elastic IP addresses with instances -version_added: 1.4 -options: - instance_id: - description: - - The EC2 instance id - required: false - public_ip: - description: - - The elastic IP address to associate with the instance. - - If absent, allocate a new address - required: false - state: - description: - - If present, associate the IP with the instance. - - If absent, disassociate the IP with the instance. - required: false - choices: ['present', 'absent'] - default: present - region: - description: - - the EC2 region to use - required: false - default: null - aliases: [ ec2_region ] - in_vpc: - description: - - allocate an EIP inside a VPC or not - required: false - default: false - version_added: "1.4" - reuse_existing_ip_allowed: - description: - - Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one. - required: false - default: false - version_added: "1.6" - wait_timeout: - description: - - how long to wait in seconds for newly provisioned EIPs to become available - default: 300 - version_added: "1.7" - -extends_documentation_fragment: aws -author: Lorin Hochstein -notes: - - This module will return C(public_ip) on success, which will contain the - public IP address associated with the instance. - - There may be a delay between the time the Elastic IP is assigned and when - the cloud instance is reachable via the new address. Use wait_for and pause - to delay further playbook execution until the instance is reachable, if - necessary. -''' - -EXAMPLES = ''' -- name: associate an elastic IP with an instance - ec2_eip: instance_id=i-1212f003 ip=93.184.216.119 - -- name: disassociate an elastic IP from an instance - ec2_eip: instance_id=i-1212f003 ip=93.184.216.119 state=absent - -- name: allocate a new elastic IP and associate it with an instance - ec2_eip: instance_id=i-1212f003 - -- name: allocate a new elastic IP without associating it to anything - ec2_eip: - register: eip -- name: output the IP - debug: msg="Allocated IP is {{ eip.public_ip }}" - -- name: provision new instances with ec2 - ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes group=webserver count=3 - register: ec2 -- name: associate new elastic IPs with each of the instances - ec2_eip: "instance_id={{ item }}" - with_items: ec2.instance_ids - -- name: allocate a new elastic IP inside a VPC in us-west-2 - ec2_eip: region=us-west-2 in_vpc=yes - register: eip -- name: output the IP - debug: msg="Allocated IP inside a VPC is {{ eip.public_ip }}" -''' - -try: - import boto.ec2 -except ImportError: - boto_found = False -else: - boto_found = True - - -wait_timeout = 0 - -def associate_ip_and_instance(ec2, address, instance_id, module): - if ip_is_associated_with_instance(ec2, address.public_ip, instance_id, module): - module.exit_json(changed=False, public_ip=address.public_ip) - - # If we're in check mode, nothing else to do - if module.check_mode: - module.exit_json(changed=True) - - try: - if address.domain == "vpc": - res = ec2.associate_address(instance_id, allocation_id=address.allocation_id) - else: - res = ec2.associate_address(instance_id, public_ip=address.public_ip) - except boto.exception.EC2ResponseError, e: - module.fail_json(msg=str(e)) - - if res: - module.exit_json(changed=True, public_ip=address.public_ip) - else: - module.fail_json(msg="association failed") - - -def disassociate_ip_and_instance(ec2, address, instance_id, module): - if not ip_is_associated_with_instance(ec2, address.public_ip, instance_id, module): - module.exit_json(changed=False, public_ip=address.public_ip) - - # If we're in check mode, nothing else to do - if module.check_mode: - module.exit_json(changed=True) - - try: - if address.domain == "vpc": - res = ec2.disassociate_address(association_id=address.association_id) - else: - res = ec2.disassociate_address(public_ip=address.public_ip) - except boto.exception.EC2ResponseError, e: - module.fail_json(msg=str(e)) - - if res: - module.exit_json(changed=True) - else: - module.fail_json(msg="disassociation failed") - - -def find_address(ec2, public_ip, module): - """ Find an existing Elastic IP address """ - if wait_timeout != 0: - timeout = time.time() + wait_timeout - while timeout > time.time(): - try: - addresses = ec2.get_all_addresses([public_ip]) - break - except boto.exception.EC2ResponseError, e: - if "Address '%s' not found." % public_ip in e.message : - pass - else: - module.fail_json(msg=str(e.message)) - time.sleep(5) - - if timeout <= time.time(): - module.fail_json(msg = "wait for EIPs timeout on %s" % time.asctime()) - else: - try: - addresses = ec2.get_all_addresses([public_ip]) - except boto.exception.EC2ResponseError, e: - module.fail_json(msg=str(e.message)) - - return addresses[0] - - -def ip_is_associated_with_instance(ec2, public_ip, instance_id, module): - """ Check if the elastic IP is currently associated with the instance """ - address = find_address(ec2, public_ip, module) - if address: - return address.instance_id == instance_id - else: - return False - - -def allocate_address(ec2, domain, module, reuse_existing_ip_allowed): - """ Allocate a new elastic IP address (when needed) and return it """ - # If we're in check mode, nothing else to do - if module.check_mode: - module.exit_json(change=True) - - if reuse_existing_ip_allowed: - if domain: - domain_filter = { 'domain' : domain } - else: - domain_filter = { 'domain' : 'standard' } - all_addresses = ec2.get_all_addresses(filters=domain_filter) - - unassociated_addresses = filter(lambda a: a.instance_id == "", all_addresses) - if unassociated_addresses: - address = unassociated_addresses[0]; - else: - address = ec2.allocate_address(domain=domain) - else: - address = ec2.allocate_address(domain=domain) - - return address - - -def release_address(ec2, public_ip, module): - """ Release a previously allocated elastic IP address """ - - address = find_address(ec2, public_ip, module) - - # If we're in check mode, nothing else to do - if module.check_mode: - module.exit_json(change=True) - - res = address.release() - if res: - module.exit_json(changed=True) - else: - module.fail_json(msg="release failed") - - -def find_instance(ec2, instance_id, module): - """ Attempt to find the EC2 instance and return it """ - - try: - reservations = ec2.get_all_reservations(instance_ids=[instance_id]) - except boto.exception.EC2ResponseError, e: - module.fail_json(msg=str(e)) - - if len(reservations) == 1: - instances = reservations[0].instances - if len(instances) == 1: - return instances[0] - - module.fail_json(msg="could not find instance" + instance_id) - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - instance_id = dict(required=False), - public_ip = dict(required=False, aliases= ['ip']), - state = dict(required=False, default='present', - choices=['present', 'absent']), - in_vpc = dict(required=False, type='bool', default=False), - reuse_existing_ip_allowed = dict(required=False, type='bool', default=False), - wait_timeout = dict(default=300), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - if not boto_found: - module.fail_json(msg="boto is required") - - ec2 = ec2_connect(module) - - instance_id = module.params.get('instance_id') - public_ip = module.params.get('public_ip') - state = module.params.get('state') - in_vpc = module.params.get('in_vpc') - domain = "vpc" if in_vpc else None - reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') - new_eip_timeout = int(module.params.get('wait_timeout')) - - if state == 'present': - # Allocate an EIP and exit - if not instance_id and not public_ip: - address = allocate_address(ec2, domain, module, reuse_existing_ip_allowed) - module.exit_json(changed=True, public_ip=address.public_ip) - - # Return the EIP object since we've been given a public IP - if public_ip: - address = find_address(ec2, public_ip, module) - - # Allocate an IP for instance since no public_ip was provided - if instance_id and not public_ip: - instance = find_instance(ec2, instance_id, module) - if instance.vpc_id: - domain = "vpc" - address = allocate_address(ec2, domain, module, reuse_existing_ip_allowed) - # overriding the timeout since this is a a newly provisioned ip - global wait_timeout - wait_timeout = new_eip_timeout - - # Associate address object (provided or allocated) with instance - associate_ip_and_instance(ec2, address, instance_id, module) - - else: - #disassociating address from instance - if instance_id: - address = find_address(ec2, public_ip, module) - disassociate_ip_and_instance(ec2, address, instance_id, module) - #releasing address - else: - release_address(ec2, public_ip, module) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -if __name__ == '__main__': - main() diff --git a/library/cloud/ec2_elb b/library/cloud/ec2_elb deleted file mode 100644 index 42cb1819025..00000000000 --- a/library/cloud/ec2_elb +++ /dev/null @@ -1,339 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ ---- -module: ec2_elb -short_description: De-registers or registers instances from EC2 ELBs -description: - - This module de-registers or registers an AWS EC2 instance from the ELBs - that it belongs to. - - Returns fact "ec2_elbs" which is a list of elbs attached to the instance - if state=absent is passed as an argument. - - Will be marked changed when called only if there are ELBs found to operate on. -version_added: "1.2" -author: John Jarvis -options: - state: - description: - - register or deregister the instance - required: true - choices: ['present', 'absent'] - instance_id: - description: - - EC2 Instance ID - required: true - ec2_elbs: - description: - - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register. - required: false - default: None - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - enable_availability_zone: - description: - - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already - been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB. - required: false - default: yes - choices: [ "yes", "no" ] - wait: - description: - - Wait for instance registration or deregistration to complete successfully before returning. - required: false - default: yes - choices: [ "yes", "no" ] - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. - required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - wait_timeout: - description: - - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no. - required: false - default: 0 - version_added: "1.6" -extends_documentation_fragment: aws -""" - -EXAMPLES = """ -# basic pre_task and post_task example -pre_tasks: - - name: Gathering ec2 facts - ec2_facts: - - name: Instance De-register - local_action: ec2_elb - args: - instance_id: "{{ ansible_ec2_instance_id }}" - state: 'absent' -roles: - - myrole -post_tasks: - - name: Instance Register - local_action: ec2_elb - args: - instance_id: "{{ ansible_ec2_instance_id }}" - ec2_elbs: "{{ item }}" - state: 'present' - with_items: ec2_elbs -""" - -import time -import sys -import os - -try: - import boto - import boto.ec2 - import boto.ec2.elb - from boto.regioninfo import RegionInfo -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -class ElbManager: - """Handles EC2 instance ELB registration and de-registration""" - - def __init__(self, module, instance_id=None, ec2_elbs=None, - region=None, **aws_connect_params): - self.module = module - self.instance_id = instance_id - self.region = region - self.aws_connect_params = aws_connect_params - self.lbs = self._get_instance_lbs(ec2_elbs) - self.changed = False - - def deregister(self, wait, timeout): - """De-register the instance from all ELBs and wait for the ELB - to report it out-of-service""" - - for lb in self.lbs: - initial_state = self._get_instance_health(lb) - if initial_state is None: - # The instance isn't registered with this ELB so just - # return unchanged - return - - lb.deregister_instances([self.instance_id]) - - # The ELB is changing state in some way. Either an instance that's - # InService is moving to OutOfService, or an instance that's - # already OutOfService is being deregistered. - self.changed = True - - if wait: - self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout) - - def register(self, wait, enable_availability_zone, timeout): - """Register the instance for all ELBs and wait for the ELB - to report the instance in-service""" - for lb in self.lbs: - initial_state = self._get_instance_health(lb) - - if enable_availability_zone: - self._enable_availailability_zone(lb) - - lb.register_instances([self.instance_id]) - - if wait: - self._await_elb_instance_state(lb, 'InService', initial_state, timeout) - else: - # We cannot assume no change was made if we don't wait - # to find out - self.changed = True - - def exists(self, lbtest): - """ Verify that the named ELB actually exists """ - - found = False - for lb in self.lbs: - if lb.name == lbtest: - found=True - break - return found - - def _enable_availailability_zone(self, lb): - """Enable the current instance's availability zone in the provided lb. - Returns True if the zone was enabled or False if no change was made. - lb: load balancer""" - instance = self._get_instance() - if instance.placement in lb.availability_zones: - return False - - lb.enable_zones(zones=instance.placement) - - # If successful, the new zone will have been added to - # lb.availability_zones - return instance.placement in lb.availability_zones - - def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout): - """Wait for an ELB to change state - lb: load balancer - awaited_state : state to poll for (string)""" - - wait_timeout = time.time() + timeout - while True: - instance_state = self._get_instance_health(lb) - - if not instance_state: - msg = ("The instance %s could not be put in service on %s." - " Reason: Invalid Instance") - self.module.fail_json(msg=msg % (self.instance_id, lb)) - - if instance_state.state == awaited_state: - # Check the current state against the initial state, and only set - # changed if they are different. - if (initial_state is None) or (instance_state.state != initial_state.state): - self.changed = True - break - elif self._is_instance_state_pending(instance_state): - # If it's pending, we'll skip further checks andd continue waiting - pass - elif (awaited_state == 'InService' - and instance_state.reason_code == "Instance" - and time.time() >= wait_timeout): - # If the reason_code for the instance being out of service is - # "Instance" this indicates a failure state, e.g. the instance - # has failed a health check or the ELB does not have the - # instance's availabilty zone enabled. The exact reason why is - # described in InstantState.description. - msg = ("The instance %s could not be put in service on %s." - " Reason: %s") - self.module.fail_json(msg=msg % (self.instance_id, - lb, - instance_state.description)) - time.sleep(1) - - def _is_instance_state_pending(self, instance_state): - """ - Determines whether the instance_state is "pending", meaning there is - an operation under way to bring it in service. - """ - # This is messy, because AWS provides no way to distinguish between - # an instance that is is OutOfService because it's pending vs. OutOfService - # because it's failing health checks. So we're forced to analyze the - # description, which is likely to be brittle. - return (instance_state and 'pending' in instance_state.description) - - def _get_instance_health(self, lb): - """ - Check instance health, should return status object or None under - certain error conditions. - """ - try: - status = lb.get_instance_health([self.instance_id])[0] - except boto.exception.BotoServerError, e: - if e.error_code == 'InvalidInstance': - return None - else: - raise - return status - - def _get_instance_lbs(self, ec2_elbs=None): - """Returns a list of ELBs attached to self.instance_id - ec2_elbs: an optional list of elb names that will be used - for elb lookup instead of returning what elbs - are attached to self.instance_id""" - - try: - elb = connect_to_aws(boto.ec2.elb, self.region, - **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: - self.module.fail_json(msg=str(e)) - - elbs = elb.get_all_load_balancers() - - if ec2_elbs: - lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs) - else: - lbs = [] - for lb in elbs: - for info in lb.instances: - if self.instance_id == info.id: - lbs.append(lb) - return lbs - - def _get_instance(self): - """Returns a boto.ec2.InstanceObject for self.instance_id""" - try: - ec2 = connect_to_aws(boto.ec2, self.region, - **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: - self.module.fail_json(msg=str(e)) - return ec2.get_only_instances(instance_ids=[self.instance_id])[0] - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - state={'required': True}, - instance_id={'required': True}, - ec2_elbs={'default': None, 'required': False, 'type':'list'}, - enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, - wait={'required': False, 'default': True, 'type': 'bool'}, - wait_timeout={'requred': False, 'default': 0, 'type': 'int'} - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - ) - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - - if not region: - module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") - - ec2_elbs = module.params['ec2_elbs'] - wait = module.params['wait'] - enable_availability_zone = module.params['enable_availability_zone'] - timeout = module.params['wait_timeout'] - - if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: - module.fail_json(msg="ELBs are required for registration") - - instance_id = module.params['instance_id'] - elb_man = ElbManager(module, instance_id, ec2_elbs, - region=region, **aws_connect_params) - - if ec2_elbs is not None: - for elb in ec2_elbs: - if not elb_man.exists(elb): - msg="ELB %s does not exist" % elb - module.fail_json(msg=msg) - - if module.params['state'] == 'present': - elb_man.register(wait, enable_availability_zone, timeout) - elif module.params['state'] == 'absent': - elb_man.deregister(wait, timeout) - - ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]} - ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts) - - module.exit_json(**ec2_facts_result) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/ec2_elb_lb b/library/cloud/ec2_elb_lb deleted file mode 100644 index f15de268924..00000000000 --- a/library/cloud/ec2_elb_lb +++ /dev/null @@ -1,698 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ ---- -module: ec2_elb_lb -description: Creates or destroys Amazon ELB. -short_description: Creates or destroys Amazon ELB. - - Returns information about the load balancer. - - Will be marked changed when called only if state is changed. -version_added: "1.5" -author: Jim Dalton -options: - state: - description: - - Create or destroy the ELB - required: true - name: - description: - - The name of the ELB - required: true - listeners: - description: - - List of ports/protocols for this ELB to listen on (see example) - required: false - purge_listeners: - description: - - Purge existing listeners on ELB that are not found in listeners - required: false - default: true - zones: - description: - - List of availability zones to enable on this ELB - required: false - purge_zones: - description: - - Purge existing availability zones on ELB that are not found in zones - required: false - default: false - security_group_ids: - description: - - A list of security groups to apply to the elb - require: false - default: None - version_added: "1.6" - health_check: - description: - - An associative array of health check configuration settigs (see example) - require: false - default: None - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - subnets: - description: - - A list of VPC subnets to use when creating ELB. Zones should be empty if using this. - required: false - default: None - aliases: [] - version_added: "1.7" - purge_subnets: - description: - - Purge existing subnet on ELB that are not found in subnets - required: false - default: false - version_added: "1.7" - scheme: - description: - - The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'. - required: false - default: 'internet-facing' - version_added: "1.7" - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. - required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - connection_draining_timeout: - description: - - Wait a specified timeout allowing connections to drain before terminating an instance - required: false - aliases: [] - version_added: "1.8" - cross_az_load_balancing: - description: - - Distribute load across all configured Availablity Zones - required: false - default: "no" - choices: ["yes", "no"] - aliases: [] - version_added: "1.8" - -extends_documentation_fragment: aws -""" - -EXAMPLES = """ -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. - -# Basic provisioning example -- local_action: - module: ec2_elb_lb - name: "test-please-delete" - state: present - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http # options are http, https, ssl, tcp - load_balancer_port: 80 - instance_port: 80 - - protocol: https - load_balancer_port: 443 - instance_protocol: http # optional, defaults to value of protocol setting - instance_port: 80 - # ssl certificate required for https or ssl - ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert" - - -# Basic VPC provisioning example -- local_action: - module: ec2_elb_lb - name: "test-vpc" - scheme: internal - state: present - subnets: - - subnet-abcd1234 - - subnet-1a2b3c4d - listeners: - - protocol: http # options are http, https, ssl, tcp - load_balancer_port: 80 - instance_port: 80 - -# Configure a health check -- local_action: - module: ec2_elb_lb - name: "test-please-delete" - state: present - zones: - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - health_check: - ping_protocol: http # options are http, https, ssl, tcp - ping_port: 80 - ping_path: "/index.html" # not required for tcp or ssl - response_timeout: 5 # seconds - interval: 30 # seconds - unhealthy_threshold: 2 - healthy_threshold: 10 - -# Ensure ELB is gone -- local_action: - module: ec2_elb_lb - name: "test-please-delete" - state: absent - -# Normally, this module will purge any listeners that exist on the ELB -# but aren't specified in the listeners parameter. If purge_listeners is -# false it leaves them alone -- local_action: - module: ec2_elb_lb - name: "test-please-delete" - state: present - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - purge_listeners: no - -# Normally, this module will leave availability zones that are enabled -# on the ELB alone. If purge_zones is true, then any extreneous zones -# will be removed -- local_action: - module: ec2_elb_lb - name: "test-please-delete" - state: present - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - purge_zones: yes - -# Creates a ELB and assigns a list of subnets to it. -- local_action: - module: ec2_elb_lb - state: present - name: 'New ELB' - security_group_ids: 'sg-123456, sg-67890' - region: us-west-2 - subnets: 'subnet-123456, subnet-67890' - purge_subnets: yes - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - -# Create an ELB with connection draining and cross availability -# zone load balancing -- local_action: - module: ec2_elb_lb - name: "New ELB" - state: present - connection_draining_timeout: 60 - cross_az_load_balancing: "yes" - region: us-east-1 - zones: - - us-east-1a - - us-east-1d - listeners: - - protocols: http - - load_balancer_port: 80 - - instance_port: 80 -""" - -import sys -import os - -try: - import boto - import boto.ec2.elb - import boto.ec2.elb.attributes - from boto.ec2.elb.healthcheck import HealthCheck - from boto.regioninfo import RegionInfo -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -class ElbManager(object): - """Handles ELB creation and destruction""" - - def __init__(self, module, name, listeners=None, purge_listeners=None, - zones=None, purge_zones=None, security_group_ids=None, - health_check=None, subnets=None, purge_subnets=None, - scheme="internet-facing", connection_draining_timeout=None, - cross_az_load_balancing=None, region=None, **aws_connect_params): - - self.module = module - self.name = name - self.listeners = listeners - self.purge_listeners = purge_listeners - self.zones = zones - self.purge_zones = purge_zones - self.security_group_ids = security_group_ids - self.health_check = health_check - self.subnets = subnets - self.purge_subnets = purge_subnets - self.scheme = scheme - self.connection_draining_timeout = connection_draining_timeout - self.cross_az_load_balancing = cross_az_load_balancing - - self.aws_connect_params = aws_connect_params - self.region = region - - self.changed = False - self.status = 'gone' - self.elb_conn = self._get_elb_connection() - self.elb = self._get_elb() - - def ensure_ok(self): - """Create the ELB""" - if not self.elb: - # Zones and listeners will be added at creation - self._create_elb() - else: - self._set_zones() - self._set_security_groups() - self._set_elb_listeners() - self._set_subnets() - self._set_health_check() - # boto has introduced support for some ELB attributes in - # different versions, so we check first before trying to - # set them to avoid errors - if self._check_attribute_support('connection_draining'): - self._set_connection_draining_timeout() - if self._check_attribute_support('cross_zone_load_balancing'): - self._set_cross_az_load_balancing() - - def ensure_gone(self): - """Destroy the ELB""" - if self.elb: - self._delete_elb() - - def get_info(self): - try: - check_elb = self.elb_conn.get_all_load_balancers(self.name)[0] - except: - check_elb = None - - if not check_elb: - info = { - 'name': self.name, - 'status': self.status - } - else: - info = { - 'name': check_elb.name, - 'dns_name': check_elb.dns_name, - 'zones': check_elb.availability_zones, - 'security_group_ids': check_elb.security_groups, - 'status': self.status, - 'subnets': self.subnets, - 'scheme': check_elb.scheme - } - - if check_elb.health_check: - info['health_check'] = { - 'target': check_elb.health_check.target, - 'interval': check_elb.health_check.interval, - 'timeout': check_elb.health_check.timeout, - 'healthy_threshold': check_elb.health_check.healthy_threshold, - 'unhealthy_threshold': check_elb.health_check.unhealthy_threshold, - } - - if check_elb.listeners: - info['listeners'] = [l.get_complex_tuple() - for l in check_elb.listeners] - elif self.status == 'created': - # When creating a new ELB, listeners don't show in the - # immediately returned result, so just include the - # ones that were added - info['listeners'] = [self._listener_as_tuple(l) - for l in self.listeners] - else: - info['listeners'] = [] - - if self._check_attribute_support('connection_draining'): - info['connection_draining_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout - - if self._check_attribute_support('cross_zone_load_balancing'): - is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing') - if is_cross_az_lb_enabled: - info['cross_az_load_balancing'] = 'yes' - else: - info['cross_az_load_balancing'] = 'no' - - return info - - def _get_elb(self): - elbs = self.elb_conn.get_all_load_balancers() - for elb in elbs: - if self.name == elb.name: - self.status = 'ok' - return elb - - def _get_elb_connection(self): - try: - return connect_to_aws(boto.ec2.elb, self.region, - **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: - self.module.fail_json(msg=str(e)) - - def _delete_elb(self): - # True if succeeds, exception raised if not - result = self.elb_conn.delete_load_balancer(name=self.name) - if result: - self.changed = True - self.status = 'deleted' - - def _create_elb(self): - listeners = [self._listener_as_tuple(l) for l in self.listeners] - self.elb = self.elb_conn.create_load_balancer(name=self.name, - zones=self.zones, - security_groups=self.security_group_ids, - complex_listeners=listeners, - subnets=self.subnets, - scheme=self.scheme) - if self.elb: - self.changed = True - self.status = 'created' - - def _create_elb_listeners(self, listeners): - """Takes a list of listener tuples and creates them""" - # True if succeeds, exception raised if not - self.changed = self.elb_conn.create_load_balancer_listeners(self.name, - complex_listeners=listeners) - - def _delete_elb_listeners(self, listeners): - """Takes a list of listener tuples and deletes them from the elb""" - ports = [l[0] for l in listeners] - - # True if succeeds, exception raised if not - self.changed = self.elb_conn.delete_load_balancer_listeners(self.name, - ports) - - def _set_elb_listeners(self): - """ - Creates listeners specified by self.listeners; overwrites existing - listeners on these ports; removes extraneous listeners - """ - listeners_to_add = [] - listeners_to_remove = [] - listeners_to_keep = [] - - # Check for any listeners we need to create or overwrite - for listener in self.listeners: - listener_as_tuple = self._listener_as_tuple(listener) - - # First we loop through existing listeners to see if one is - # already specified for this port - existing_listener_found = None - for existing_listener in self.elb.listeners: - # Since ELB allows only one listener on each incoming port, a - # single match on the incomping port is all we're looking for - if existing_listener[0] == listener['load_balancer_port']: - existing_listener_found = existing_listener.get_complex_tuple() - break - - if existing_listener_found: - # Does it match exactly? - if listener_as_tuple != existing_listener_found: - # The ports are the same but something else is different, - # so we'll remove the exsiting one and add the new one - listeners_to_remove.append(existing_listener_found) - listeners_to_add.append(listener_as_tuple) - else: - # We already have this listener, so we're going to keep it - listeners_to_keep.append(existing_listener_found) - else: - # We didn't find an existing listener, so just add the new one - listeners_to_add.append(listener_as_tuple) - - # Check for any extraneous listeners we need to remove, if desired - if self.purge_listeners: - for existing_listener in self.elb.listeners: - existing_listener_tuple = existing_listener.get_complex_tuple() - if existing_listener_tuple in listeners_to_remove: - # Already queued for removal - continue - if existing_listener_tuple in listeners_to_keep: - # Keep this one around - continue - # Since we're not already removing it and we don't need to keep - # it, let's get rid of it - listeners_to_remove.append(existing_listener_tuple) - - if listeners_to_remove: - self._delete_elb_listeners(listeners_to_remove) - - if listeners_to_add: - self._create_elb_listeners(listeners_to_add) - - def _listener_as_tuple(self, listener): - """Formats listener as a 4- or 5-tuples, in the order specified by the - ELB API""" - # N.B. string manipulations on protocols below (str(), upper()) is to - # ensure format matches output from ELB API - listener_list = [ - listener['load_balancer_port'], - listener['instance_port'], - str(listener['protocol'].upper()), - ] - - # Instance protocol is not required by ELB API; it defaults to match - # load balancer protocol. We'll mimic that behavior here - if 'instance_protocol' in listener: - listener_list.append(str(listener['instance_protocol'].upper())) - else: - listener_list.append(str(listener['protocol'].upper())) - - if 'ssl_certificate_id' in listener: - listener_list.append(str(listener['ssl_certificate_id'])) - - return tuple(listener_list) - - def _enable_zones(self, zones): - try: - self.elb.enable_zones(zones) - except boto.exception.BotoServerError, e: - if "Invalid Availability Zone" in e.error_message: - self.module.fail_json(msg=e.error_message) - else: - self.module.fail_json(msg="an unknown server error occurred, please try again later") - self.changed = True - - def _disable_zones(self, zones): - try: - self.elb.disable_zones(zones) - except boto.exception.BotoServerError, e: - if "Invalid Availability Zone" in e.error_message: - self.module.fail_json(msg=e.error_message) - else: - self.module.fail_json(msg="an unknown server error occurred, please try again later") - self.changed = True - - def _attach_subnets(self, subnets): - self.elb_conn.attach_lb_to_subnets(self.name, subnets) - self.changed = True - - def _detach_subnets(self, subnets): - self.elb_conn.detach_lb_from_subnets(self.name, subnets) - self.changed = True - - def _set_subnets(self): - """Determine which subnets need to be attached or detached on the ELB""" - if self.subnets: - if self.purge_subnets: - subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets)) - subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets)) - else: - subnets_to_detach = None - subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets)) - - if subnets_to_attach: - self._attach_subnets(subnets_to_attach) - if subnets_to_detach: - self._detach_subnets(subnets_to_detach) - - def _set_zones(self): - """Determine which zones need to be enabled or disabled on the ELB""" - if self.zones: - if self.purge_zones: - zones_to_disable = list(set(self.elb.availability_zones) - - set(self.zones)) - zones_to_enable = list(set(self.zones) - - set(self.elb.availability_zones)) - else: - zones_to_disable = None - zones_to_enable = list(set(self.zones) - - set(self.elb.availability_zones)) - if zones_to_enable: - self._enable_zones(zones_to_enable) - # N.B. This must come second, in case it would have removed all zones - if zones_to_disable: - self._disable_zones(zones_to_disable) - - def _set_security_groups(self): - if self.security_group_ids != None and set(self.elb.security_groups) != set(self.security_group_ids): - self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids) - self.Changed = True - - def _set_health_check(self): - """Set health check values on ELB as needed""" - if self.health_check: - # This just makes it easier to compare each of the attributes - # and look for changes. Keys are attributes of the current - # health_check; values are desired values of new health_check - health_check_config = { - "target": self._get_health_check_target(), - "timeout": self.health_check['response_timeout'], - "interval": self.health_check['interval'], - "unhealthy_threshold": self.health_check['unhealthy_threshold'], - "healthy_threshold": self.health_check['healthy_threshold'], - } - - update_health_check = False - - # The health_check attribute is *not* set on newly created - # ELBs! So we have to create our own. - if not self.elb.health_check: - self.elb.health_check = HealthCheck() - - for attr, desired_value in health_check_config.iteritems(): - if getattr(self.elb.health_check, attr) != desired_value: - setattr(self.elb.health_check, attr, desired_value) - update_health_check = True - - if update_health_check: - self.elb.configure_health_check(self.elb.health_check) - self.changed = True - - def _check_attribute_support(self, attr): - return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr) - - def _set_cross_az_load_balancing(self): - attributes = self.elb.get_attributes() - if self.cross_az_load_balancing: - attributes.cross_zone_load_balancing.enabled = True - else: - attributes.cross_zone_load_balancing.enabled = False - self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing', - attributes.cross_zone_load_balancing.enabled) - - def _set_connection_draining_timeout(self): - attributes = self.elb.get_attributes() - if self.connection_draining_timeout is not None: - attributes.connection_draining.enabled = True - attributes.connection_draining.timeout = self.connection_draining_timeout - self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining) - else: - attributes.connection_draining.enabled = False - self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining) - - def _get_health_check_target(self): - """Compose target string from healthcheck parameters""" - protocol = self.health_check['ping_protocol'].upper() - path = "" - - if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check: - path = self.health_check['ping_path'] - - return "%s:%s%s" % (protocol, self.health_check['ping_port'], path) - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - state={'required': True, 'choices': ['present', 'absent']}, - name={'required': True}, - listeners={'default': None, 'required': False, 'type': 'list'}, - purge_listeners={'default': True, 'required': False, 'type': 'bool'}, - zones={'default': None, 'required': False, 'type': 'list'}, - purge_zones={'default': False, 'required': False, 'type': 'bool'}, - security_group_ids={'default': None, 'required': False, 'type': 'list'}, - health_check={'default': None, 'required': False, 'type': 'dict'}, - subnets={'default': None, 'required': False, 'type': 'list'}, - purge_subnets={'default': False, 'required': False, 'type': 'bool'}, - scheme={'default': 'internet-facing', 'required': False}, - connection_draining_timeout={'default': None, 'required': False}, - cross_az_load_balancing={'default': None, 'required': False} - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - ) - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if not region: - module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") - - name = module.params['name'] - state = module.params['state'] - listeners = module.params['listeners'] - purge_listeners = module.params['purge_listeners'] - zones = module.params['zones'] - purge_zones = module.params['purge_zones'] - security_group_ids = module.params['security_group_ids'] - health_check = module.params['health_check'] - subnets = module.params['subnets'] - purge_subnets = module.params['purge_subnets'] - scheme = module.params['scheme'] - connection_draining_timeout = module.params['connection_draining_timeout'] - cross_az_load_balancing = module.params['cross_az_load_balancing'] - - if state == 'present' and not listeners: - module.fail_json(msg="At least one port is required for ELB creation") - - if state == 'present' and not (zones or subnets): - module.fail_json(msg="At least one availability zone or subnet is required for ELB creation") - - elb_man = ElbManager(module, name, listeners, purge_listeners, zones, - purge_zones, security_group_ids, health_check, - subnets, purge_subnets, scheme, - connection_draining_timeout, cross_az_load_balancing, - region=region, **aws_connect_params) - - # check for unsupported attributes for this version of boto - if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'): - module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute") - - if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'): - module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute") - - if state == 'present': - elb_man.ensure_ok() - elif state == 'absent': - elb_man.ensure_gone() - - ansible_facts = {'ec2_elb': 'info'} - ec2_facts_result = dict(changed=elb_man.changed, - elb=elb_man.get_info(), - ansible_facts=ansible_facts) - - module.exit_json(**ec2_facts_result) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/ec2_facts b/library/cloud/ec2_facts deleted file mode 100644 index 227f7140697..00000000000 --- a/library/cloud/ec2_facts +++ /dev/null @@ -1,180 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ec2_facts -short_description: Gathers facts about remote hosts within ec2 (aws) -version_added: "1.0" -options: - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 -description: - - This module fetches data from the metadata servers in ec2 (aws). - Eucalyptus cloud provides a similar service and this module should - work this cloud provider as well. -notes: - - Parameters to filter on ec2_facts may be added later. -author: "Silviu Dicu " -''' - -EXAMPLES = ''' -# Conditional example -- name: Gather facts - action: ec2_facts - -- name: Conditional - action: debug msg="This instance is a t1.micro" - when: ansible_ec2_instance_type == "t1.micro" -''' - -import socket -import re - -socket.setdefaulttimeout(5) - -class Ec2Metadata(object): - - ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/' - ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key' - ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/' - - AWS_REGIONS = ('ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2', - 'us-gov-west-1' - ) - - def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None): - self.module = module - self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri - self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri - self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri - self._data = {} - self._prefix = 'ansible_ec2_%s' - - def _fetch(self, url): - (response, info) = fetch_url(self.module, url, force=True) - if response: - data = response.read() - else: - data = None - return data - - def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']): - new_fields = {} - for key, value in fields.iteritems(): - split_fields = key[len(uri):].split('/') - if len(split_fields) > 1 and split_fields[1]: - new_key = "-".join(split_fields) - new_fields[self._prefix % new_key] = value - else: - new_key = "".join(split_fields) - new_fields[self._prefix % new_key] = value - for pattern in filter_patterns: - for key in new_fields.keys(): - match = re.search(pattern, key) - if match: - new_fields.pop(key) - return new_fields - - def fetch(self, uri, recurse=True): - raw_subfields = self._fetch(uri) - if not raw_subfields: - return - subfields = raw_subfields.split('\n') - for field in subfields: - if field.endswith('/') and recurse: - self.fetch(uri + field) - if uri.endswith('/'): - new_uri = uri + field - else: - new_uri = uri + '/' + field - if new_uri not in self._data and not new_uri.endswith('/'): - content = self._fetch(new_uri) - if field == 'security-groups': - sg_fields = ",".join(content.split('\n')) - self._data['%s' % (new_uri)] = sg_fields - else: - self._data['%s' % (new_uri)] = content - - def fix_invalid_varnames(self, data): - """Change ':'' and '-' to '_' to ensure valid template variable names""" - for (key, value) in data.items(): - if ':' in key or '-' in key: - newkey = key.replace(':','_').replace('-','_') - del data[key] - data[newkey] = value - - def add_ec2_region(self, data): - """Use the 'ansible_ec2_placement_availability_zone' key/value - pair to add 'ansible_ec2_placement_region' key/value pair with - the EC2 region name. - """ - - # Only add a 'ansible_ec2_placement_region' key if the - # 'ansible_ec2_placement_availability_zone' exists. - zone = data.get('ansible_ec2_placement_availability_zone') - if zone is not None: - # Use the zone name as the region name unless the zone - # name starts with a known AWS region name. - region = zone - for r in self.AWS_REGIONS: - if zone.startswith(r): - region = r - break - data['ansible_ec2_placement_region'] = region - - def run(self): - self.fetch(self.uri_meta) # populate _data - data = self._mangle_fields(self._data, self.uri_meta) - data[self._prefix % 'user-data'] = self._fetch(self.uri_user) - data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh) - self.fix_invalid_varnames(data) - self.add_ec2_region(data) - return data - -def main(): - argument_spec = url_argument_spec() - - module = AnsibleModule( - argument_spec = argument_spec, - supports_check_mode = True, - ) - - ec2_facts = Ec2Metadata(module).run() - ec2_facts_result = dict(changed=False, ansible_facts=ec2_facts) - - module.exit_json(**ec2_facts_result) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/cloud/ec2_group b/library/cloud/ec2_group deleted file mode 100644 index 1c8aa701015..00000000000 --- a/library/cloud/ec2_group +++ /dev/null @@ -1,386 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - - -DOCUMENTATION = ''' ---- -module: ec2_group -version_added: "1.3" -short_description: maintain an ec2 VPC security group. -description: - - maintains ec2 security groups. This module has a dependency on python-boto >= 2.5 -options: - name: - description: - - Name of the security group. - required: true - description: - description: - - Description of the security group. - required: true - vpc_id: - description: - - ID of the VPC to create the group in. - required: false - rules: - description: - - List of firewall inbound rules to enforce in this group (see example). - required: false - rules_egress: - description: - - List of firewall outbound rules to enforce in this group (see example). - required: false - version_added: "1.6" - region: - description: - - the EC2 region to use - required: false - default: null - aliases: [] - state: - version_added: "1.4" - description: - - Create or delete a security group - required: false - default: 'present' - choices: [ "present", "absent" ] - aliases: [] - purge_rules: - version_added: "1.8" - description: - - Purge existing rules on security group that are not found in rules - required: false - default: 'true' - aliases: [] - purge_rules_egress: - version_added: "1.8" - description: - - Purge existing rules_egree on security group that are not found in rules_egress - required: false - default: 'true' - aliases: [] - -extends_documentation_fragment: aws - -notes: - - If a rule declares a group_name and that group doesn't exist, it will be - automatically created. In that case, group_desc should be provided as well. - The module will refuse to create a depended-on group without a description. -''' - -EXAMPLES = ''' -- name: example ec2 group - local_action: - module: ec2_group - name: example - description: an example EC2 group - vpc_id: 12345 - region: eu-west-1a - aws_secret_key: SECRET - aws_access_key: ACCESS - rules: - - proto: tcp - from_port: 80 - to_port: 80 - cidr_ip: 0.0.0.0/0 - - proto: tcp - from_port: 22 - to_port: 22 - cidr_ip: 10.0.0.0/8 - - proto: udp - from_port: 10050 - to_port: 10050 - cidr_ip: 10.0.0.0/8 - - proto: udp - from_port: 10051 - to_port: 10051 - group_id: sg-12345678 - - proto: all - # the containing group name may be specified here - group_name: example - rules_egress: - - proto: tcp - from_port: 80 - to_port: 80 - group_name: example-other - # description to use if example-other needs to be created - group_desc: other example EC2 group -''' - -try: - import boto.ec2 -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -def addRulesToLookup(rules, prefix, dict): - for rule in rules: - for grant in rule.grants: - dict["%s-%s-%s-%s-%s-%s" % (prefix, rule.ip_protocol, rule.from_port, rule.to_port, - grant.group_id, grant.cidr_ip)] = rule - - -def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id): - """ - Returns tuple of (group_id, ip) after validating rule params. - - rule: Dict describing a rule. - name: Name of the security group being managed. - groups: Dict of all available security groups. - - AWS accepts an ip range or a security group as target of a rule. This - function validate the rule specification and return either a non-None - group_id or a non-None ip range. - """ - - group_id = None - group_name = None - ip = None - target_group_created = False - if 'group_id' in rule and 'cidr_ip' in rule: - module.fail_json(msg="Specify group_id OR cidr_ip, not both") - elif 'group_name' in rule and 'cidr_ip' in rule: - module.fail_json(msg="Specify group_name OR cidr_ip, not both") - elif 'group_id' in rule and 'group_name' in rule: - module.fail_json(msg="Specify group_id OR group_name, not both") - elif 'group_id' in rule: - group_id = rule['group_id'] - elif 'group_name' in rule: - group_name = rule['group_name'] - if group_name in groups: - group_id = groups[group_name].id - elif group_name == name: - group_id = group.id - groups[group_id] = group - groups[group_name] = group - else: - if not rule.get('group_desc', '').strip(): - module.fail_json(msg="group %s will be automatically created by rule %s and no description was provided" % (group_name, rule)) - if not module.check_mode: - auto_group = ec2.create_security_group(group_name, rule['group_desc'], vpc_id=vpc_id) - group_id = auto_group.id - groups[group_id] = auto_group - groups[group_name] = auto_group - target_group_created = True - elif 'cidr_ip' in rule: - ip = rule['cidr_ip'] - - return group_id, ip, target_group_created - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - name=dict(required=True), - description=dict(required=True), - vpc_id=dict(), - rules=dict(), - rules_egress=dict(), - state = dict(default='present', choices=['present', 'absent']), - purge_rules=dict(default=True, required=False, type='bool'), - purge_rules_egress=dict(default=True, required=False, type='bool'), - - ) - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - name = module.params['name'] - description = module.params['description'] - vpc_id = module.params['vpc_id'] - rules = module.params['rules'] - rules_egress = module.params['rules_egress'] - state = module.params.get('state') - purge_rules = module.params['purge_rules'] - purge_rules_egress = module.params['purge_rules_egress'] - - changed = False - - ec2 = ec2_connect(module) - - # find the group if present - group = None - groups = {} - for curGroup in ec2.get_all_security_groups(): - groups[curGroup.id] = curGroup - groups[curGroup.name] = curGroup - - if curGroup.name == name and (vpc_id is None or curGroup.vpc_id == vpc_id): - group = curGroup - - # Ensure requested group is absent - if state == 'absent': - if group: - '''found a match, delete it''' - try: - group.delete() - except Exception, e: - module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e)) - else: - group = None - changed = True - else: - '''no match found, no changes required''' - - # Ensure requested group is present - elif state == 'present': - if group: - '''existing group found''' - # check the group parameters are correct - group_in_use = False - rs = ec2.get_all_instances() - for r in rs: - for i in r.instances: - group_in_use |= reduce(lambda x, y: x | (y.name == 'public-ssh'), i.groups, False) - - if group.description != description: - if group_in_use: - module.fail_json(msg="Group description does not match, but it is in use so cannot be changed.") - - # if the group doesn't exist, create it now - else: - '''no match found, create it''' - if not module.check_mode: - group = ec2.create_security_group(name, description, vpc_id=vpc_id) - - # When a group is created, an egress_rule ALLOW ALL - # to 0.0.0.0/0 is added automatically but it's not - # reflected in the object returned by the AWS API - # call. We re-read the group for getting an updated object - # amazon sometimes takes a couple seconds to update the security group so wait till it exists - while len(ec2.get_all_security_groups(filters={ 'group_id': group.id, })) == 0: - time.sleep(0.1) - - group = ec2.get_all_security_groups(group_ids=(group.id,))[0] - changed = True - else: - module.fail_json(msg="Unsupported state requested: %s" % state) - - # create a lookup for all existing rules on the group - if group: - - # Manage ingress rules - groupRules = {} - addRulesToLookup(group.rules, 'in', groupRules) - - # Now, go through all provided rules and ensure they are there. - if rules: - for rule in rules: - group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id) - if target_group_created: - changed = True - - if rule['proto'] in ('all', '-1', -1): - rule['proto'] = -1 - rule['from_port'] = None - rule['to_port'] = None - - # If rule already exists, don't later delete it - ruleId = "%s-%s-%s-%s-%s-%s" % ('in', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip) - if ruleId in groupRules: - del groupRules[ruleId] - # Otherwise, add new rule - else: - grantGroup = None - if group_id: - grantGroup = groups[group_id] - - if not module.check_mode: - group.authorize(rule['proto'], rule['from_port'], rule['to_port'], ip, grantGroup) - changed = True - - # Finally, remove anything left in the groupRules -- these will be defunct rules - if purge_rules: - for rule in groupRules.itervalues() : - for grant in rule.grants: - grantGroup = None - if grant.group_id: - grantGroup = groups[grant.group_id] - if not module.check_mode: - group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup) - changed = True - - # Manage egress rules - groupRules = {} - addRulesToLookup(group.rules_egress, 'out', groupRules) - - # Now, go through all provided rules and ensure they are there. - if rules_egress: - for rule in rules_egress: - group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id) - if target_group_created: - changed = True - - if rule['proto'] in ('all', '-1', -1): - rule['proto'] = -1 - rule['from_port'] = None - rule['to_port'] = None - - # If rule already exists, don't later delete it - ruleId = "%s-%s-%s-%s-%s-%s" % ('out', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip) - if ruleId in groupRules: - del groupRules[ruleId] - # Otherwise, add new rule - else: - grantGroup = None - if group_id: - grantGroup = groups[group_id].id - - if not module.check_mode: - ec2.authorize_security_group_egress( - group_id=group.id, - ip_protocol=rule['proto'], - from_port=rule['from_port'], - to_port=rule['to_port'], - src_group_id=grantGroup, - cidr_ip=ip) - changed = True - elif vpc_id and not module.check_mode: - # when using a vpc, but no egress rules are specified, - # we add in a default allow all out rule, which was the - # default behavior before egress rules were added - default_egress_rule = 'out--1-None-None-None-0.0.0.0/0' - if default_egress_rule not in groupRules: - ec2.authorize_security_group_egress( - group_id=group.id, - ip_protocol=-1, - from_port=None, - to_port=None, - src_group_id=None, - cidr_ip='0.0.0.0/0' - ) - changed = True - else: - # make sure the default egress rule is not removed - del groupRules[default_egress_rule] - - # Finally, remove anything left in the groupRules -- these will be defunct rules - if purge_rules_egress: - for rule in groupRules.itervalues(): - for grant in rule.grants: - grantGroup = None - if grant.group_id: - grantGroup = groups[grant.group_id].id - if not module.check_mode: - ec2.revoke_security_group_egress( - group_id=group.id, - ip_protocol=rule.ip_protocol, - from_port=rule.from_port, - to_port=rule.to_port, - src_group_id=grantGroup, - cidr_ip=grant.cidr_ip) - changed = True - - if group: - module.exit_json(changed=changed, group_id=group.id) - else: - module.exit_json(changed=changed, group_id=None) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/ec2_key b/library/cloud/ec2_key deleted file mode 100644 index 9c8274f764a..00000000000 --- a/library/cloud/ec2_key +++ /dev/null @@ -1,238 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - - -DOCUMENTATION = ''' ---- -module: ec2_key -version_added: "1.5" -short_description: maintain an ec2 key pair. -description: - - maintains ec2 key pairs. This module has a dependency on python-boto >= 2.5 -options: - name: - description: - - Name of the key pair. - required: true - key_material: - description: - - Public key material. - required: false - region: - description: - - the EC2 region to use - required: false - default: null - aliases: [] - state: - description: - - create or delete keypair - required: false - default: 'present' - aliases: [] - wait: - description: - - Wait for the specified action to complete before returning. - required: false - default: false - aliases: [] - version_added: "1.6" - wait_timeout: - description: - - How long before wait gives up, in seconds - required: false - default: 300 - aliases: [] - version_added: "1.6" - -extends_documentation_fragment: aws -author: Vincent Viallet -''' - -EXAMPLES = ''' -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. - -# Creates a new ec2 key pair named `example` if not present, returns generated -# private key -- name: example ec2 key - local_action: - module: ec2_key - name: example - -# Creates a new ec2 key pair named `example` if not present using provided key -# material -- name: example2 ec2 key - local_action: - module: ec2_key - name: example2 - key_material: 'ssh-rsa AAAAxyz...== me@example.com' - state: present - -# Creates a new ec2 key pair named `example` if not present using provided key -# material -- name: example3 ec2 key - local_action: - module: ec2_key - name: example3 - key_material: "{{ item }}" - with_file: /path/to/public_key.id_rsa.pub - -# Removes ec2 key pair by name -- name: remove example key - local_action: - module: ec2_key - name: example - state: absent -''' - -try: - import boto.ec2 -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -import random -import string - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - name=dict(required=True), - key_material=dict(required=False), - state = dict(default='present', choices=['present', 'absent']), - wait = dict(type='bool', default=False), - wait_timeout = dict(default=300), - ) - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - name = module.params['name'] - state = module.params.get('state') - key_material = module.params.get('key_material') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - changed = False - - ec2 = ec2_connect(module) - - # find the key if present - key = ec2.get_key_pair(name) - - # Ensure requested key is absent - if state == 'absent': - if key: - '''found a match, delete it''' - try: - key.delete() - if wait: - start = time.time() - action_complete = False - while (time.time() - start) < wait_timeout: - if not ec2.get_key_pair(name): - action_complete = True - break - time.sleep(1) - if not action_complete: - module.fail_json(msg="timed out while waiting for the key to be removed") - except Exception, e: - module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e)) - else: - key = None - changed = True - else: - '''no match found, no changes required''' - - # Ensure requested key is present - elif state == 'present': - if key: - # existing key found - if key_material: - # EC2's fingerprints are non-trivial to generate, so push this key - # to a temporary name and make ec2 calculate the fingerprint for us. - # - # http://blog.jbrowne.com/?p=23 - # https://forums.aws.amazon.com/thread.jspa?messageID=352828 - - # find an unused name - test = 'empty' - while test: - randomchars = [random.choice(string.ascii_letters + string.digits) for x in range(0,10)] - tmpkeyname = "ansible-" + ''.join(randomchars) - test = ec2.get_key_pair(tmpkeyname) - - # create tmp key - tmpkey = ec2.import_key_pair(tmpkeyname, key_material) - # get tmp key fingerprint - tmpfingerprint = tmpkey.fingerprint - # delete tmp key - tmpkey.delete() - - if key.fingerprint != tmpfingerprint: - if not module.check_mode: - key.delete() - key = ec2.import_key_pair(name, key_material) - - if wait: - start = time.time() - action_complete = False - while (time.time() - start) < wait_timeout: - if ec2.get_key_pair(name): - action_complete = True - break - time.sleep(1) - if not action_complete: - module.fail_json(msg="timed out while waiting for the key to be re-created") - - changed = True - pass - - # if the key doesn't exist, create it now - else: - '''no match found, create it''' - if not module.check_mode: - if key_material: - '''We are providing the key, need to import''' - key = ec2.import_key_pair(name, key_material) - else: - ''' - No material provided, let AWS handle the key creation and - retrieve the private key - ''' - key = ec2.create_key_pair(name) - - if wait: - start = time.time() - action_complete = False - while (time.time() - start) < wait_timeout: - if ec2.get_key_pair(name): - action_complete = True - break - time.sleep(1) - if not action_complete: - module.fail_json(msg="timed out while waiting for the key to be created") - - changed = True - - if key: - data = { - 'name': key.name, - 'fingerprint': key.fingerprint - } - if key.material: - data.update({'private_key': key.material}) - - module.exit_json(changed=changed, key=data) - else: - module.exit_json(changed=changed, key=None) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/ec2_lc b/library/cloud/ec2_lc deleted file mode 100755 index b58eabd53e7..00000000000 --- a/library/cloud/ec2_lc +++ /dev/null @@ -1,274 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ ---- -module: ec2_lc -short_description: Create or delete AWS Autoscaling Launch Configurations -description: - - Can create or delete AwS Autoscaling Configurations - - Works with the ec2_asg module to manage Autoscaling Groups -version_added: "1.6" -author: Gareth Rushgrove -options: - state: - description: - - register or deregister the instance - required: true - choices: ['present', 'absent'] - name: - description: - - Unique name for configuration - required: true - instance_type: - description: - - instance type to use for the instance - required: true - default: null - aliases: [] - image_id: - description: - - The AMI unique identifier to be used for the group - required: false - key_name: - description: - - The SSH key name to be used for access to managed instances - required: false - security_groups: - description: - - A list of security groups into which instances should be found - required: false - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - volumes: - description: - - a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. - required: false - default: null - aliases: [] - user_data: - description: - - opaque blob of data which is made available to the ec2 instance - required: false - default: null - aliases: [] - kernel_id: - description: - - Kernel id for the EC2 instance - required: false - default: null - aliases: [] - spot_price: - description: - - The spot price you are bidding. Only applies for an autoscaling group with spot instances. - required: false - default: null - instance_monitoring: - description: - - whether instances in group are launched with detailed monitoring. - required: false - default: false - aliases: [] - assign_public_ip: - description: - - Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC. - required: false - default: false - aliases: [] - version_added: "1.8" - ramdisk_id: - description: - - A RAM disk id for the instances. - required: false - default: null - aliases: [] - version_added: "1.8" - instance_profile_name: - description: - - The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instances. - required: false - default: null - aliases: [] - version_added: "1.8" - ebs_optimized: - description: - - Specifies whether the instance is optimized for EBS I/O (true) or not (false). - required: false - default: false - aliases: [] - version_added: "1.8" -extends_documentation_fragment: aws -""" - -EXAMPLES = ''' -- ec2_lc: - name: special - image_id: ami-XXX - key_name: default - security_groups: 'group,group2' - instance_type: t1.micro - -''' - -import sys -import time - -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -try: - from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping - import boto.ec2.autoscale - from boto.ec2.autoscale import LaunchConfiguration - from boto.exception import BotoServerError -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -def create_block_device(module, volume): - # Not aware of a way to determine this programatically - # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/ - MAX_IOPS_TO_SIZE_RATIO = 30 - if 'snapshot' not in volume and 'ephemeral' not in volume: - if 'volume_size' not in volume: - module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume') - if 'snapshot' in volume: - if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume: - module.fail_json(msg='io1 volumes must have an iops value set') - if 'ephemeral' in volume: - if 'snapshot' in volume: - module.fail_json(msg='Cannot set both ephemeral and snapshot') - return BlockDeviceType(snapshot_id=volume.get('snapshot'), - ephemeral_name=volume.get('ephemeral'), - size=volume.get('volume_size'), - volume_type=volume.get('device_type'), - delete_on_termination=volume.get('delete_on_termination', False), - iops=volume.get('iops')) - - -def create_launch_config(connection, module): - name = module.params.get('name') - image_id = module.params.get('image_id') - key_name = module.params.get('key_name') - security_groups = module.params['security_groups'] - user_data = module.params.get('user_data') - volumes = module.params['volumes'] - instance_type = module.params.get('instance_type') - spot_price = module.params.get('spot_price') - instance_monitoring = module.params.get('instance_monitoring') - assign_public_ip = module.params.get('assign_public_ip') - kernel_id = module.params.get('kernel_id') - ramdisk_id = module.params.get('ramdisk_id') - instance_profile_name = module.params.get('instance_profile_name') - ebs_optimized = module.params.get('ebs_optimized') - bdm = BlockDeviceMapping() - - if volumes: - for volume in volumes: - if 'device_name' not in volume: - module.fail_json(msg='Device name must be set for volume') - # Minimum volume size is 1GB. We'll use volume size explicitly set to 0 - # to be a signal not to create this volume - if 'volume_size' not in volume or int(volume['volume_size']) > 0: - bdm[volume['device_name']] = create_block_device(module, volume) - - lc = LaunchConfiguration( - name=name, - image_id=image_id, - key_name=key_name, - security_groups=security_groups, - user_data=user_data, - block_device_mappings=[bdm], - instance_type=instance_type, - kernel_id=kernel_id, - spot_price=spot_price, - instance_monitoring=instance_monitoring, - associate_public_ip_address = assign_public_ip, - ramdisk_id=ramdisk_id, - instance_profile_name=instance_profile_name, - ebs_optimized=ebs_optimized, - ) - - launch_configs = connection.get_all_launch_configurations(names=[name]) - changed = False - if not launch_configs: - try: - connection.create_launch_configuration(lc) - launch_configs = connection.get_all_launch_configurations(names=[name]) - changed = True - except BotoServerError, e: - module.fail_json(msg=str(e)) - result = launch_configs[0] - - module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time), - image_id=result.image_id, arn=result.launch_configuration_arn, - security_groups=result.security_groups, instance_type=instance_type) - - -def delete_launch_config(connection, module): - name = module.params.get('name') - launch_configs = connection.get_all_launch_configurations(names=[name]) - if launch_configs: - launch_configs[0].delete() - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - name=dict(required=True, type='str'), - image_id=dict(type='str'), - key_name=dict(type='str'), - security_groups=dict(type='list'), - user_data=dict(type='str'), - kernel_id=dict(type='str'), - volumes=dict(type='list'), - instance_type=dict(type='str'), - state=dict(default='present', choices=['present', 'absent']), - spot_price=dict(type='float'), - ramdisk_id=dict(type='str'), - instance_profile_name=dict(type='str'), - ebs_optimized=dict(default=False, type='bool'), - associate_public_ip_address=dict(type='bool'), - instance_monitoring=dict(default=False, type='bool'), - assign_public_ip=dict(default=False, type='bool') - ) - ) - - module = AnsibleModule(argument_spec=argument_spec) - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - - try: - connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - - state = module.params.get('state') - - if state == 'present': - create_launch_config(connection, module) - elif state == 'absent': - delete_launch_config(connection, module) - -main() diff --git a/library/cloud/ec2_metric_alarm b/library/cloud/ec2_metric_alarm deleted file mode 100644 index 519f88f24f8..00000000000 --- a/library/cloud/ec2_metric_alarm +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ -module: ec2_metric_alarm -short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'" -description: - - Can create or delete AWS metric alarms - - Metrics you wish to alarm on must already exist -version_added: "1.6" -author: Zacharie Eakin -options: - state: - description: - - register or deregister the alarm - required: true - choices: ['present', 'absent'] - name: - desciption: - - Unique name for the alarm - required: true - metric: - description: - - Name of the monitored metric (e.g. CPUUtilization) - - Metric must already exist - required: false - namespace: - description: - - Name of the appropriate namespace ('AWS/EC2', 'System/Linux', etc.), which determines the category it will appear under in cloudwatch - required: false - statistic: - description: - - Operation applied to the metric - - Works in conjunction with period and evaluation_periods to determine the comparison value - required: false - options: ['SampleCount','Average','Sum','Minimum','Maximum'] - comparison: - description: - - Determines how the threshold value is compared - required: false - options: ['<=','<','>','>='] - threshold: - description: - - Sets the min/max bound for triggering the alarm - required: false - period: - description: - - The time (in seconds) between metric evaluations - required: false - evaluation_periods: - description: - - The number of times in which the metric is evaluated before final calculation - required: false - unit: - description: - - The threshold's unit of measurement - required: false - options: ['Seconds','Microseconds','Milliseconds','Bytes','Kilobytes','Megabytes','Gigabytes','Terabytes','Bits','Kilobits','Megabits','Gigabits','Terabits','Percent','Count','Bytes/Second','Kilobytes/Second','Megabytes/Second','Gigabytes/Second','Terabytes/Second','Bits/Second','Kilobits/Second','Megabits/Second','Gigabits/Second','Terabits/Second','Count/Second','None'] - description: - description: - - A longer desciption of the alarm - required: false - dimensions: - description: - - Describes to what the alarm is applied - required: false - alarm_actions: - description: - - A list of the names action(s) taken when the alarm is in the 'alarm' status - required: false - insufficient_data_actions: - description: - - A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status - required: false - ok_actions: - description: - - A list of the names of action(s) to take when the alarm is in the 'ok' status - required: false -extends_documentation_fragment: aws -""" - -EXAMPLES = ''' - - name: create alarm - ec2_metric_alarm: - state: present - region: ap-southeast-2 - name: "cpu-low" - metric: "CPUUtilization" - namespace: "AWS/EC2" - statistic: Average - comparison: "<=" - threshold: 5.0 - period: 300 - evaluation_periods: 3 - unit: "Percent" - description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes " - dimensions: {'InstanceId':'i-XXX'} - alarm_actions: ["action1","action2"] - - -''' - -import sys - -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -try: - import boto.ec2.cloudwatch - from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm - from boto.exception import BotoServerError -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -def create_metric_alarm(connection, module): - - name = module.params.get('name') - metric = module.params.get('metric') - namespace = module.params.get('namespace') - statistic = module.params.get('statistic') - comparison = module.params.get('comparison') - threshold = module.params.get('threshold') - period = module.params.get('period') - evaluation_periods = module.params.get('evaluation_periods') - unit = module.params.get('unit') - description = module.params.get('description') - dimensions = module.params.get('dimensions') - alarm_actions = module.params.get('alarm_actions') - insufficient_data_actions = module.params.get('insufficient_data_actions') - ok_actions = module.params.get('ok_actions') - - alarms = connection.describe_alarms(alarm_names=[name]) - - if not alarms: - - alm = MetricAlarm( - name=name, - metric=metric, - namespace=namespace, - statistic=statistic, - comparison=comparison, - threshold=threshold, - period=period, - evaluation_periods=evaluation_periods, - unit=unit, - description=description, - dimensions=dimensions, - alarm_actions=alarm_actions, - insufficient_data_actions=insufficient_data_actions, - ok_actions=ok_actions - ) - try: - connection.create_alarm(alm) - changed = True - alarms = connection.describe_alarms(alarm_names=[name]) - except BotoServerError, e: - module.fail_json(msg=str(e)) - - else: - alarm = alarms[0] - changed = False - - for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'): - if getattr(alarm, attr) != module.params.get(attr): - changed = True - setattr(alarm, attr, module.params.get(attr)) - #this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm - comparison = alarm.comparison - comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'} - alarm.comparison = comparisons[comparison] - - dim1 = module.params.get('dimensions') - dim2 = alarm.dimensions - - for keys in dim1: - if not isinstance(dim1[keys], list): - dim1[keys] = [dim1[keys]] - if dim1[keys] != dim2[keys]: - changed=True - setattr(alarm, 'dimensions', dim1) - - for attr in ('alarm_actions','insufficient_data_actions','ok_actions'): - action = module.params.get(attr) or [] - if getattr(alarm, attr) != action: - changed = True - setattr(alarm, attr, module.params.get(attr)) - - try: - if changed: - connection.create_alarm(alarm) - except BotoServerError, e: - module.fail_json(msg=str(e)) - result = alarms[0] - module.exit_json(changed=changed, name=result.name, - actions_enabled=result.actions_enabled, - alarm_actions=result.alarm_actions, - alarm_arn=result.alarm_arn, - comparison=result.comparison, - description=result.description, - dimensions=result.dimensions, - evaluation_periods=result.evaluation_periods, - insufficient_data_actions=result.insufficient_data_actions, - last_updated=result.last_updated, - metric=result.metric, - namespace=result.namespace, - ok_actions=result.ok_actions, - period=result.period, - state_reason=result.state_reason, - state_value=result.state_value, - statistic=result.statistic, - threshold=result.threshold, - unit=result.unit) - -def delete_metric_alarm(connection, module): - name = module.params.get('name') - - alarms = connection.describe_alarms(alarm_names=[name]) - - if alarms: - try: - connection.delete_alarms([name]) - module.exit_json(changed=True) - except BotoServerError, e: - module.fail_json(msg=str(e)) - else: - module.exit_json(changed=False) - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - name=dict(required=True, type='str'), - metric=dict(type='str'), - namespace=dict(type='str'), - statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']), - comparison=dict(type='str', choices=['<=', '<', '>', '>=']), - threshold=dict(type='float'), - period=dict(type='int'), - unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']), - evaluation_periods=dict(type='int'), - description=dict(type='str'), - dimensions=dict(type='dict'), - alarm_actions=dict(type='list'), - insufficient_data_actions=dict(type='list'), - ok_actions=dict(type='list'), - state=dict(default='present', choices=['present', 'absent']), - region=dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), - ) - ) - - module = AnsibleModule(argument_spec=argument_spec) - - state = module.params.get('state') - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - try: - connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - - if state == 'present': - create_metric_alarm(connection, module) - elif state == 'absent': - delete_metric_alarm(connection, module) - -main() diff --git a/library/cloud/ec2_scaling_policy b/library/cloud/ec2_scaling_policy deleted file mode 100755 index ad1fa7ce7f1..00000000000 --- a/library/cloud/ec2_scaling_policy +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/python - -DOCUMENTATION = """ -module: ec2_scaling_policy -short_description: Create or delete AWS scaling policies for Autoscaling groups -description: - - Can create or delete scaling policies for autoscaling groups - - Referenced autoscaling groups must already exist -version_added: "1.6" -author: Zacharie Eakin -options: - state: - description: - - register or deregister the policy - required: true - choices: ['present', 'absent'] - name: - description: - - Unique name for the scaling policy - required: true - asg_name: - description: - - Name of the associated autoscaling group - required: true - adjustment_type: - desciption: - - The type of change in capacity of the autoscaling group - required: false - choices: ['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity'] - scaling_adjustment: - description: - - The amount by which the autoscaling group is adjusted by the policy - required: false - min_adjustment_step: - description: - - Minimum amount of adjustment when policy is triggered - required: false - cooldown: - description: - - The minimum period of time between which autoscaling actions can take place - required: false -extends_documentation_fragment: aws -""" - -EXAMPLES = ''' -- ec2_scaling_policy: - state: present - region: US-XXX - name: "scaledown-policy" - adjustment_type: "ChangeInCapacity" - asg_name: "slave-pool" - scaling_adjustment: -1 - min_adjustment_step: 1 - cooldown: 300 -''' - - -import sys - -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -try: - import boto.ec2.autoscale - from boto.ec2.autoscale import ScalingPolicy - from boto.exception import BotoServerError - -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -def create_scaling_policy(connection, module): - sp_name = module.params.get('name') - adjustment_type = module.params.get('adjustment_type') - asg_name = module.params.get('asg_name') - scaling_adjustment = module.params.get('scaling_adjustment') - min_adjustment_step = module.params.get('min_adjustment_step') - cooldown = module.params.get('cooldown') - - scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name]) - - if not scalingPolicies: - sp = ScalingPolicy( - name=sp_name, - adjustment_type=adjustment_type, - as_name=asg_name, - scaling_adjustment=scaling_adjustment, - min_adjustment_step=min_adjustment_step, - cooldown=cooldown) - - try: - connection.create_scaling_policy(sp) - policy = connection.get_all_policies(policy_names=[sp_name])[0] - module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step) - except BotoServerError, e: - module.fail_json(msg=str(e)) - else: - policy = scalingPolicies[0] - changed = False - - # min_adjustment_step attribute is only relevant if the adjustment_type - # is set to percentage change in capacity, so it is a special case - if getattr(policy, 'adjustment_type') == 'PercentChangeInCapacity': - if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'): - changed = True - - # set the min adjustment step incase the user decided to change their - # adjustment type to percentage - setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step')) - - # check the remaining attributes - for attr in ('adjustment_type','scaling_adjustment','cooldown'): - if getattr(policy, attr) != module.params.get(attr): - changed = True - setattr(policy, attr, module.params.get(attr)) - - try: - if changed: - connection.create_scaling_policy(policy) - policy = connection.get_all_policies(policy_names=[sp_name])[0] - module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step) - except BotoServerError, e: - module.fail_json(msg=str(e)) - - -def delete_scaling_policy(connection, module): - sp_name = module.params.get('name') - asg_name = module.params.get('asg_name') - - scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name]) - - if scalingPolicies: - try: - connection.delete_policy(sp_name, asg_name) - module.exit_json(changed=True) - except BotoServerError, e: - module.exit_json(changed=False, msg=str(e)) - else: - module.exit_json(changed=False) - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - name = dict(required=True, type='str'), - adjustment_type = dict(type='str', choices=['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']), - asg_name = dict(required=True, type='str'), - scaling_adjustment = dict(type='int'), - min_adjustment_step = dict(type='int'), - cooldown = dict(type='int'), - region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), - state=dict(default='present', choices=['present', 'absent']), - ) - ) - - module = AnsibleModule(argument_spec=argument_spec) - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - - state = module.params.get('state') - - try: - connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - if not connection: - module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region)) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - - if state == 'present': - create_scaling_policy(connection, module) - elif state == 'absent': - delete_scaling_policy(connection, module) - - -main() diff --git a/library/cloud/ec2_snapshot b/library/cloud/ec2_snapshot deleted file mode 100644 index a37aadb95e2..00000000000 --- a/library/cloud/ec2_snapshot +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ec2_snapshot -short_description: creates a snapshot from an existing volume -description: - - creates an EC2 snapshot from an existing EBS volume -version_added: "1.5" -options: - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - volume_id: - description: - - volume from which to take the snapshot - required: false - description: - description: - - description to be applied to the snapshot - required: false - instance_id: - description: - - instance that has the required volume to snapshot mounted - required: false - device_name: - description: - - device name of a mounted volume to be snapshotted - required: false - snapshot_tags: - description: - - a hash/dictionary of tags to add to the snapshot - required: false - version_added: "1.6" - -author: Will Thames -extends_documentation_fragment: aws -''' - -EXAMPLES = ''' -# Simple snapshot of volume using volume_id -- local_action: - module: ec2_snapshot - volume_id: vol-abcdef12 - description: snapshot of /data from DB123 taken 2013/11/28 12:18:32 - -# Snapshot of volume mounted on device_name attached to instance_id -- local_action: - module: ec2_snapshot - instance_id: i-12345678 - device_name: /dev/sdb1 - description: snapshot of /data from DB123 taken 2013/11/28 12:18:32 - -# Snapshot of volume with tagging -- local_action: - module: ec2_snapshot - instance_id: i-12345678 - device_name: /dev/sdb1 - snapshot_tags: - frequency: hourly - source: /data -''' - -import sys -import time - -try: - import boto.ec2 -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - volume_id = dict(), - description = dict(), - instance_id = dict(), - device_name = dict(), - wait = dict(type='bool', default='true'), - wait_timeout = dict(default=0), - snapshot_tags = dict(type='dict', default=dict()), - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - volume_id = module.params.get('volume_id') - description = module.params.get('description') - instance_id = module.params.get('instance_id') - device_name = module.params.get('device_name') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - snapshot_tags = module.params.get('snapshot_tags') - - if not volume_id and not instance_id or volume_id and instance_id: - module.fail_json('One and only one of volume_id or instance_id must be specified') - if instance_id and not device_name or device_name and not instance_id: - module.fail_json('Instance ID and device name must both be specified') - - ec2 = ec2_connect(module) - - if instance_id: - try: - volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name}) - if not volumes: - module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id)) - volume_id = volumes[0].id - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - try: - snapshot = ec2.create_snapshot(volume_id, description=description) - time_waited = 0 - if wait: - snapshot.update() - while snapshot.status != 'completed': - time.sleep(3) - snapshot.update() - time_waited += 3 - if wait_timeout and time_waited > wait_timeout: - module.fail_json('Timed out while creating snapshot.') - for k, v in snapshot_tags.items(): - snapshot.add_tag(k, v) - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - module.exit_json(changed=True, snapshot_id=snapshot.id, volume_id=snapshot.volume_id, - volume_size=snapshot.volume_size, tags=snapshot.tags.copy()) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/ec2_tag b/library/cloud/ec2_tag deleted file mode 100644 index 4a33112189a..00000000000 --- a/library/cloud/ec2_tag +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ec2_tag -short_description: create and remove tag(s) to ec2 resources. -description: - - Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto. -version_added: "1.3" -options: - resource: - description: - - The EC2 resource id. - required: true - default: null - aliases: [] - state: - description: - - Whether the tags should be present or absent on the resource. Use list to interrogate the tags of an instance. - required: false - default: present - choices: ['present', 'absent', 'list'] - aliases: [] - region: - description: - - region in which the resource exists. - required: false - default: null - aliases: ['aws_region', 'ec2_region'] - -author: Lester Wade -extends_documentation_fragment: aws -''' - -EXAMPLES = ''' -# Basic example of adding tag(s) -tasks: -- name: tag a resource - local_action: ec2_tag resource=vol-XXXXXX region=eu-west-1 state=present - args: - tags: - Name: ubervol - env: prod - -# Playbook example of adding tag(s) to spawned instances -tasks: -- name: launch some instances - local_action: ec2 keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image_id }} wait=true region=eu-west-1 - register: ec2 - -- name: tag my launched instances - local_action: ec2_tag resource={{ item.id }} region=eu-west-1 state=present - with_items: ec2.instances - args: - tags: - Name: webserver - env: prod -''' - -# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes. -# if state=present and it doesn't exist, create, tag and attach. -# Check for state by looking for volume attachment with tag (and against block device mapping?). -# Would personally like to revisit this in May when Eucalyptus also has tagging support (3.3). - -import sys -import time - -try: - import boto.ec2 -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - resource = dict(required=True), - tags = dict(), - state = dict(default='present', choices=['present', 'absent', 'list']), - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - resource = module.params.get('resource') - tags = module.params.get('tags') - state = module.params.get('state') - - ec2 = ec2_connect(module) - - # We need a comparison here so that we can accurately report back changed status. - # Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate. - filters = {'resource-id' : resource} - gettags = ec2.get_all_tags(filters=filters) - - dictadd = {} - dictremove = {} - baddict = {} - tagdict = {} - for tag in gettags: - tagdict[tag.name] = tag.value - - if state == 'present': - if not tags: - module.fail_json(msg="tags argument is required when state is present") - if set(tags.items()).issubset(set(tagdict.items())): - module.exit_json(msg="Tags already exists in %s." %resource, changed=False) - else: - for (key, value) in set(tags.items()): - if (key, value) not in set(tagdict.items()): - dictadd[key] = value - tagger = ec2.create_tags(resource, dictadd) - gettags = ec2.get_all_tags(filters=filters) - module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True) - - if state == 'absent': - if not tags: - module.fail_json(msg="tags argument is required when state is absent") - for (key, value) in set(tags.items()): - if (key, value) not in set(tagdict.items()): - baddict[key] = value - if set(baddict) == set(tags): - module.exit_json(msg="Nothing to remove here. Move along.", changed=False) - for (key, value) in set(tags.items()): - if (key, value) in set(tagdict.items()): - dictremove[key] = value - tagger = ec2.delete_tags(resource, dictremove) - gettags = ec2.get_all_tags(filters=filters) - module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True) - - if state == 'list': - module.exit_json(changed=False, tags=tagdict) - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/ec2_vol b/library/cloud/ec2_vol deleted file mode 100644 index 49d3a601430..00000000000 --- a/library/cloud/ec2_vol +++ /dev/null @@ -1,434 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ec2_vol -short_description: create and attach a volume, return volume id and device map -description: - - creates an EBS volume and optionally attaches it to an instance. If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made. This module has a dependency on python-boto. -version_added: "1.1" -options: - instance: - description: - - instance ID if you wish to attach the volume. - required: false - default: null - aliases: [] - name: - description: - - volume Name tag if you wish to attach an existing volume (requires instance) - required: false - default: null - aliases: [] - version_added: "1.6" - id: - description: - - volume id if you wish to attach an existing volume (requires instance) or remove an existing volume - required: false - default: null - aliases: [] - version_added: "1.6" - volume_size: - description: - - size of volume (in GB) to create. - required: false - default: null - aliases: [] - iops: - description: - - the provisioned IOPs you want to associate with this volume (integer). - required: false - default: 100 - aliases: [] - version_added: "1.3" - encrypted: - description: - - Enable encryption at rest for this volume. - default: false - version_added: "1.8" - device_name: - description: - - device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows. - required: false - default: null - aliases: [] - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - default: null - aliases: ['aws_region', 'ec2_region'] - zone: - description: - - zone in which to create the volume, if unset uses the zone the instance is in (if set) - required: false - default: null - aliases: ['aws_zone', 'ec2_zone'] - snapshot: - description: - - snapshot ID on which to base the volume - required: false - default: null - version_added: "1.5" - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. - required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - state: - description: - - whether to ensure the volume is present or absent, or to list existing volumes - required: false - default: present - choices: ['absent', 'present', 'list'] - version_added: "1.6" -author: Lester Wade -extends_documentation_fragment: aws -''' - -EXAMPLES = ''' -# Simple attachment action -- local_action: - module: ec2_vol - instance: XXXXXX - volume_size: 5 - device_name: sdd - -# Example using custom iops params -- local_action: - module: ec2_vol - instance: XXXXXX - volume_size: 5 - iops: 200 - device_name: sdd - -# Example using snapshot id -- local_action: - module: ec2_vol - instance: XXXXXX - snapshot: "{{ snapshot }}" - -# Playbook example combined with instance launch -- local_action: - module: ec2 - keypair: "{{ keypair }}" - image: "{{ image }}" - wait: yes - count: 3 - register: ec2 -- local_action: - module: ec2_vol - instance: "{{ item.id }} " - volume_size: 5 - with_items: ec2.instances - register: ec2_vol - -# Example: Launch an instance and then add a volue if not already present -# * Nothing will happen if the volume is already attached. -# * Volume must exist in the same zone. - -- local_action: - module: ec2 - keypair: "{{ keypair }}" - image: "{{ image }}" - zone: YYYYYY - id: my_instance - wait: yes - count: 1 - register: ec2 - -- local_action: - module: ec2_vol - instance: "{{ item.id }}" - name: my_existing_volume_Name_tag - device_name: /dev/xvdf - with_items: ec2.instances - register: ec2_vol - -# Remove a volume -- local_action: - module: ec2_vol - id: vol-XXXXXXXX - state: absent - -# List volumes for an instance -- local_action: - module: ec2_vol - instance: i-XXXXXX - state: list -''' - -# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes. -# if state=present and it doesn't exist, create, tag and attach. -# Check for state by looking for volume attachment with tag (and against block device mapping?). -# Would personally like to revisit this in May when Eucalyptus also has tagging support (3.3). - -import sys -import time - -from distutils.version import LooseVersion - -try: - import boto.ec2 -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def get_volume(module, ec2): - name = module.params.get('name') - id = module.params.get('id') - zone = module.params.get('zone') - filters = {} - volume_ids = None - if zone: - filters['availability_zone'] = zone - if name: - filters = {'tag:Name': name} - if id: - volume_ids = [id] - try: - vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters) - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - if not vols: - module.fail_json(msg="Could not find volume in zone (if specified): %s" % name or id) - if len(vols) > 1: - module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name) - return vols[0] - -def get_volumes(module, ec2): - instance = module.params.get('instance') - - if not instance: - module.fail_json(msg = "Instance must be specified to get volumes") - - try: - vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance}) - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - return vols - -def delete_volume(module, ec2): - vol = get_volume(module, ec2) - if not vol: - module.exit_json(changed=False) - else: - if vol.attachment_state() is not None: - adata = vol.attach_data - module.fail_json(msg="Volume %s is attached to an instance %s." % (vol.id, adata.instance_id)) - ec2.delete_volume(vol.id) - module.exit_json(changed=True) - -def boto_supports_volume_encryption(): - """ - Check if Boto library supports encryption of EBS volumes (added in 2.29.0) - - Returns: - True if boto library has the named param as an argument on the request_spot_instances method, else False - """ - return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0') - -def create_volume(module, ec2, zone): - name = module.params.get('name') - id = module.params.get('id') - instance = module.params.get('instance') - iops = module.params.get('iops') - encrypted = module.params.get('encrypted') - volume_size = module.params.get('volume_size') - snapshot = module.params.get('snapshot') - # If custom iops is defined we use volume_type "io1" rather than the default of "standard" - if iops: - volume_type = 'io1' - else: - volume_type = 'standard' - - # If no instance supplied, try volume creation based on module parameters. - if name or id: - if not instance: - module.fail_json(msg = "If name or id is specified, instance must also be specified") - if iops or volume_size: - module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]") - - volume = get_volume(module, ec2) - if volume.attachment_state() is not None: - adata = volume.attach_data - if adata.instance_id != instance: - module.fail_json(msg = "Volume %s is already attached to another instance: %s" - % (name or id, adata.instance_id)) - else: - module.exit_json(msg="Volume %s is already mapped on instance %s: %s" % - (name or id, adata.instance_id, adata.device), - volume_id=id, - device=adata.device, - changed=False) - else: - try: - if boto_supports_volume_encryption(): - volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted) - else: - volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops) - - while volume.status != 'available': - time.sleep(3) - volume.update() - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - return volume - - -def attach_volume(module, ec2, volume, instance): - device_name = module.params.get('device_name') - - if device_name and instance: - try: - attach = volume.attach(instance.id, device_name) - while volume.attachment_state() != 'attached': - time.sleep(3) - volume.update() - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - # If device_name isn't set, make a choice based on best practices here: - # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html - - # In future this needs to be more dynamic but combining block device mapping best practices - # (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;) - - # Use password data attribute to tell whether the instance is Windows or Linux - if device_name is None and instance: - try: - if not ec2.get_password_data(instance.id): - device_name = '/dev/sdf' - attach = volume.attach(instance.id, device_name) - while volume.attachment_state() != 'attached': - time.sleep(3) - volume.update() - else: - device_name = '/dev/xvdf' - attach = volume.attach(instance.id, device_name) - while volume.attachment_state() != 'attached': - time.sleep(3) - volume.update() - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - instance = dict(), - id = dict(), - name = dict(), - volume_size = dict(), - iops = dict(), - encrypted = dict(), - device_name = dict(), - zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']), - snapshot = dict(), - state = dict(choices=['absent', 'present', 'list'], default='present') - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - id = module.params.get('id') - name = module.params.get('name') - instance = module.params.get('instance') - volume_size = module.params.get('volume_size') - iops = module.params.get('iops') - encrypted = module.params.get('encrypted') - device_name = module.params.get('device_name') - zone = module.params.get('zone') - snapshot = module.params.get('snapshot') - state = module.params.get('state') - - ec2 = ec2_connect(module) - - if state == 'list': - returned_volumes = [] - vols = get_volumes(module, ec2) - - for v in vols: - attachment = v.attach_data - - returned_volumes.append({ - 'create_time': v.create_time, - 'id': v.id, - 'iops': v.iops, - 'size': v.size, - 'snapshot_id': v.snapshot_id, - 'status': v.status, - 'type': v.type, - 'zone': v.zone, - 'attachment_set': { - 'attach_time': attachment.attach_time, - 'device': attachment.device, - 'status': attachment.status - } - }) - - module.exit_json(changed=False, volumes=returned_volumes) - - if id and name: - module.fail_json(msg="Both id and name cannot be specified") - - if encrypted and not boto_supports_volume_encryption(): - module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes") - - # Here we need to get the zone info for the instance. This covers situation where - # instance is specified but zone isn't. - # Useful for playbooks chaining instance launch with volume create + attach and where the - # zone doesn't matter to the user. - if instance: - reservation = ec2.get_all_instances(instance_ids=instance) - inst = reservation[0].instances[0] - zone = inst.placement - - # Check if there is a volume already mounted there. - if device_name: - if device_name in inst.block_device_mapping: - module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance), - volume_id=inst.block_device_mapping[device_name].volume_id, - device=device_name, - changed=False) - - # Delaying the checks until after the instance check allows us to get volume ids for existing volumes - # without needing to pass an unused volume_size - if not volume_size and not (id or name): - module.fail_json(msg="You must specify an existing volume with id or name or a volume_size") - - if volume_size and (id or name): - module.fail_json(msg="Cannot specify volume_size and either one of name or id") - - - if state == 'absent': - delete_volume(module, ec2) - - if state == 'present': - volume = create_volume(module, ec2, zone) - if instance: - attach_volume(module, ec2, volume, inst) - module.exit_json(volume_id=volume.id, device=device_name) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc deleted file mode 100644 index 2f9840281c2..00000000000 --- a/library/cloud/ec2_vpc +++ /dev/null @@ -1,623 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ec2_vpc -short_description: configure AWS virtual private clouds -description: - - Create or terminates AWS virtual private clouds. This module has a dependency on python-boto. -version_added: "1.4" -options: - cidr_block: - description: - - "The cidr block representing the VPC, e.g. 10.0.0.0/16" - required: false, unless state=present - instance_tenancy: - description: - - "The supported tenancy options for instances launched into the VPC." - required: false - default: "default" - choices: [ "default", "dedicated" ] - dns_support: - description: - - toggles the "Enable DNS resolution" flag - required: false - default: "yes" - choices: [ "yes", "no" ] - dns_hostnames: - description: - - toggles the "Enable DNS hostname support for instances" flag - required: false - default: "yes" - choices: [ "yes", "no" ] - subnets: - description: - - 'A dictionary array of subnets to add of the form: { cidr: ..., az: ... , resource_tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: resource_tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed. As of 1.8, if the subnets parameter is not specified, no existing subnets will be modified.' - required: false - default: null - aliases: [] - vpc_id: - description: - - A VPC id to terminate when state=absent - required: false - default: null - aliases: [] - resource_tags: - description: - - 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exits, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.' - required: true - default: null - aliases: [] - version_added: "1.6" - internet_gateway: - description: - - Toggle whether there should be an Internet gateway attached to the VPC - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] - route_tables: - description: - - 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},] }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.' - required: false - default: null - aliases: [] - wait: - description: - - wait for the VPC to be in state 'available' before returning - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 - aliases: [] - state: - description: - - Create or terminate the VPC - required: true - default: present - aliases: [] - region: - description: - - region in which the resource exists. - required: false - default: null - aliases: ['aws_region', 'ec2_region'] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key' ] - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. - required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - -requirements: [ "boto" ] -author: Carson Gee -''' - -EXAMPLES = ''' -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. - -# Basic creation example: - local_action: - module: ec2_vpc - state: present - cidr_block: 172.23.0.0/16 - resource_tags: { "Environment":"Development" } - region: us-west-2 -# Full creation example with subnets and optional availability zones. -# The absence or presence of subnets deletes or creates them respectively. - local_action: - module: ec2_vpc - state: present - cidr_block: 172.22.0.0/16 - resource_tags: { "Environment":"Development" } - subnets: - - cidr: 172.22.1.0/24 - az: us-west-2c - resource_tags: { "Environment":"Dev", "Tier" : "Web" } - - cidr: 172.22.2.0/24 - az: us-west-2b - resource_tags: { "Environment":"Dev", "Tier" : "App" } - - cidr: 172.22.3.0/24 - az: us-west-2a - resource_tags: { "Environment":"Dev", "Tier" : "DB" } - internet_gateway: True - route_tables: - - subnets: - - 172.22.2.0/24 - - 172.22.3.0/24 - routes: - - dest: 0.0.0.0/0 - gw: igw - - subnets: - - 172.22.1.0/24 - routes: - - dest: 0.0.0.0/0 - gw: igw - region: us-west-2 - register: vpc - -# Removal of a VPC by id - local_action: - module: ec2_vpc - state: absent - vpc_id: vpc-aaaaaaa - region: us-west-2 -If you have added elements not managed by this module, e.g. instances, NATs, etc then -the delete will fail until those dependencies are removed. -''' - - -import sys -import time - -try: - import boto.ec2 - import boto.vpc - from boto.exception import EC2ResponseError -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def get_vpc_info(vpc): - """ - Retrieves vpc information from an instance - ID and returns it as a dictionary - """ - - return({ - 'id': vpc.id, - 'cidr_block': vpc.cidr_block, - 'dhcp_options_id': vpc.dhcp_options_id, - 'region': vpc.region.name, - 'state': vpc.state, - }) - -def find_vpc(module, vpc_conn, vpc_id=None, cidr=None): - """ - Finds a VPC that matches a specific id or cidr + tags - - module : AnsibleModule object - vpc_conn: authenticated VPCConnection connection object - - Returns: - A VPC object that matches either an ID or CIDR and one or more tag values - """ - - if vpc_id == None and cidr == None: - module.fail_json( - msg='You must specify either a vpc_id or a cidr block + list of unique tags, aborting' - ) - - found_vpcs = [] - - resource_tags = module.params.get('resource_tags') - - # Check for existing VPC by cidr_block or id - if vpc_id is not None: - found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available',}) - - else: - previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'}) - - for vpc in previous_vpcs: - # Get all tags for each of the found VPCs - vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id})) - - # If the supplied list of ID Tags match a subset of the VPC Tags, we found our VPC - if resource_tags and set(resource_tags.items()).issubset(set(vpc_tags.items())): - found_vpcs.append(vpc) - - found_vpc = None - - if len(found_vpcs) == 1: - found_vpc = found_vpcs[0] - - if len(found_vpcs) > 1: - module.fail_json(msg='Found more than one vpc based on the supplied criteria, aborting') - - return (found_vpc) - -def create_vpc(module, vpc_conn): - """ - Creates a new or modifies an existing VPC. - - module : AnsibleModule object - vpc_conn: authenticated VPCConnection connection object - - Returns: - A dictionary with information - about the VPC and subnets that were launched - """ - - id = module.params.get('vpc_id') - cidr_block = module.params.get('cidr_block') - instance_tenancy = module.params.get('instance_tenancy') - dns_support = module.params.get('dns_support') - dns_hostnames = module.params.get('dns_hostnames') - subnets = module.params.get('subnets') - internet_gateway = module.params.get('internet_gateway') - route_tables = module.params.get('route_tables') - vpc_spec_tags = module.params.get('resource_tags') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - changed = False - - # Check for existing VPC by cidr_block + tags or id - previous_vpc = find_vpc(module, vpc_conn, id, cidr_block) - - if previous_vpc is not None: - changed = False - vpc = previous_vpc - else: - changed = True - try: - vpc = vpc_conn.create_vpc(cidr_block, instance_tenancy) - # wait here until the vpc is available - pending = True - wait_timeout = time.time() + wait_timeout - while wait and wait_timeout > time.time() and pending: - try: - pvpc = vpc_conn.get_all_vpcs(vpc.id) - if hasattr(pvpc, 'state'): - if pvpc.state == "available": - pending = False - elif hasattr(pvpc[0], 'state'): - if pvpc[0].state == "available": - pending = False - # sometimes vpc_conn.create_vpc() will return a vpc that can't be found yet by vpc_conn.get_all_vpcs() - # when that happens, just wait a bit longer and try again - except boto.exception.BotoServerError, e: - if e.error_code != 'InvalidVpcID.NotFound': - raise - if pending: - time.sleep(5) - if wait and wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "wait for vpc availability timeout on %s" % time.asctime()) - - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - # Done with base VPC, now change to attributes and features. - - # Add resource tags - vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id})) - - if not set(vpc_spec_tags.items()).issubset(set(vpc_tags.items())): - new_tags = {} - - for (key, value) in set(vpc_spec_tags.items()): - if (key, value) not in set(vpc_tags.items()): - new_tags[key] = value - - if new_tags: - vpc_conn.create_tags(vpc.id, new_tags) - - - # boto doesn't appear to have a way to determine the existing - # value of the dns attributes, so we just set them. - # It also must be done one at a time. - vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_support=dns_support) - vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_hostnames=dns_hostnames) - - - # Process all subnet properties - if subnets is not None: - if not isinstance(subnets, list): - module.fail_json(msg='subnets needs to be a list of cidr blocks') - - current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id }) - - # First add all new subnets - for subnet in subnets: - add_subnet = True - for csn in current_subnets: - if subnet['cidr'] == csn.cidr_block: - add_subnet = False - if add_subnet: - try: - new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None)) - new_subnet_tags = subnet.get('resource_tags', None) - if new_subnet_tags: - # Sometimes AWS takes its time to create a subnet and so using new subnets's id - # to create tags results in exception. - # boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending' - # so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet - while len(vpc_conn.get_all_subnets(filters={ 'subnet-id': new_subnet.id })) == 0: - time.sleep(0.1) - - vpc_conn.create_tags(new_subnet.id, new_subnet_tags) - - changed = True - except EC2ResponseError, e: - module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e)) - - # Now delete all absent subnets - for csubnet in current_subnets: - delete_subnet = True - for subnet in subnets: - if csubnet.cidr_block == subnet['cidr']: - delete_subnet = False - if delete_subnet: - try: - vpc_conn.delete_subnet(csubnet.id) - changed = True - except EC2ResponseError, e: - module.fail_json(msg='Unable to delete subnet {0}, error: {1}'.format(csubnet.cidr_block, e)) - - # Handle Internet gateway (create/delete igw) - igw = None - igws = vpc_conn.get_all_internet_gateways(filters={'attachment.vpc-id': vpc.id}) - if len(igws) > 1: - module.fail_json(msg='EC2 returned more than one Internet Gateway for id %s, aborting' % vpc.id) - if internet_gateway: - if len(igws) != 1: - try: - igw = vpc_conn.create_internet_gateway() - vpc_conn.attach_internet_gateway(igw.id, vpc.id) - changed = True - except EC2ResponseError, e: - module.fail_json(msg='Unable to create Internet Gateway, error: {0}'.format(e)) - else: - # Set igw variable to the current igw instance for use in route tables. - igw = igws[0] - else: - if len(igws) > 0: - try: - vpc_conn.detach_internet_gateway(igws[0].id, vpc.id) - vpc_conn.delete_internet_gateway(igws[0].id) - changed = True - except EC2ResponseError, e: - module.fail_json(msg='Unable to delete Internet Gateway, error: {0}'.format(e)) - - # Handle route tables - this may be worth splitting into a - # different module but should work fine here. The strategy to stay - # indempotent is to basically build all the route tables as - # defined, track the route table ids, and then run through the - # remote list of route tables and delete any that we didn't - # create. This shouldn't interrupt traffic in theory, but is the - # only way to really work with route tables over time that I can - # think of without using painful aws ids. Hopefully boto will add - # the replace-route-table API to make this smoother and - # allow control of the 'main' routing table. - if route_tables is not None: - if not isinstance(route_tables, list): - module.fail_json(msg='route tables need to be a list of dictionaries') - - # Work through each route table and update/create to match dictionary array - all_route_tables = [] - for rt in route_tables: - try: - new_rt = vpc_conn.create_route_table(vpc.id) - for route in rt['routes']: - route_kwargs = {} - if route['gw'] == 'igw': - if not internet_gateway: - module.fail_json( - msg='You asked for an Internet Gateway ' \ - '(igw) route, but you have no Internet Gateway' - ) - route_kwargs['gateway_id'] = igw.id - elif route['gw'].startswith('i-'): - route_kwargs['instance_id'] = route['gw'] - else: - route_kwargs['gateway_id'] = route['gw'] - vpc_conn.create_route(new_rt.id, route['dest'], **route_kwargs) - - # Associate with subnets - for sn in rt['subnets']: - rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id }) - if len(rsn) != 1: - module.fail_json( - msg='The subnet {0} to associate with route_table {1} ' \ - 'does not exist, aborting'.format(sn, rt) - ) - rsn = rsn[0] - - # Disassociate then associate since we don't have replace - old_rt = vpc_conn.get_all_route_tables( - filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id} - ) - if len(old_rt) == 1: - old_rt = old_rt[0] - association_id = None - for a in old_rt.associations: - if a.subnet_id == rsn.id: - association_id = a.id - vpc_conn.disassociate_route_table(association_id) - - vpc_conn.associate_route_table(new_rt.id, rsn.id) - - all_route_tables.append(new_rt) - changed = True - except EC2ResponseError, e: - module.fail_json( - msg='Unable to create and associate route table {0}, error: ' \ - '{1}'.format(rt, e) - ) - - # Now that we are good to go on our new route tables, delete the - # old ones except the 'main' route table as boto can't set the main - # table yet. - all_rts = vpc_conn.get_all_route_tables(filters={'vpc-id': vpc.id}) - for rt in all_rts: - delete_rt = True - for newrt in all_route_tables: - if newrt.id == rt.id: - delete_rt = False - break - if delete_rt: - rta = rt.associations - is_main = False - for a in rta: - if a.main: - is_main = True - break - try: - if not is_main: - vpc_conn.delete_route_table(rt.id) - changed = True - except EC2ResponseError, e: - module.fail_json(msg='Unable to delete old route table {0}, error: {1}'.format(rt.id, e)) - - vpc_dict = get_vpc_info(vpc) - created_vpc_id = vpc.id - returned_subnets = [] - current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id }) - - for sn in current_subnets: - returned_subnets.append({ - 'resource_tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})), - 'cidr': sn.cidr_block, - 'az': sn.availability_zone, - 'id': sn.id, - }) - - return (vpc_dict, created_vpc_id, returned_subnets, changed) - -def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None): - """ - Terminates a VPC - - module: Ansible module object - vpc_conn: authenticated VPCConnection connection object - vpc_id: a vpc id to terminate - cidr: The cidr block of the VPC - can be used in lieu of an ID - - Returns a dictionary of VPC information - about the VPC terminated. - - If the VPC to be terminated is available - "changed" will be set to True. - - """ - vpc_dict = {} - terminated_vpc_id = '' - changed = False - - vpc = find_vpc(module, vpc_conn, vpc_id, cidr) - - if vpc is not None: - if vpc.state == 'available': - terminated_vpc_id=vpc.id - vpc_dict=get_vpc_info(vpc) - try: - subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id}) - for sn in subnets: - vpc_conn.delete_subnet(sn.id) - - igws = vpc_conn.get_all_internet_gateways( - filters={'attachment.vpc-id': vpc.id} - ) - for igw in igws: - vpc_conn.detach_internet_gateway(igw.id, vpc.id) - vpc_conn.delete_internet_gateway(igw.id) - - rts = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id}) - for rt in rts: - rta = rt.associations - is_main = False - for a in rta: - if a.main: - is_main = True - if not is_main: - vpc_conn.delete_route_table(rt.id) - - vpc_conn.delete_vpc(vpc.id) - except EC2ResponseError, e: - module.fail_json( - msg='Unable to delete VPC {0}, error: {1}'.format(vpc.id, e) - ) - changed = True - - return (changed, vpc_dict, terminated_vpc_id) - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - cidr_block = dict(), - instance_tenancy = dict(choices=['default', 'dedicated'], default='default'), - wait = dict(type='bool', default=False), - wait_timeout = dict(default=300), - dns_support = dict(type='bool', default=True), - dns_hostnames = dict(type='bool', default=True), - subnets = dict(type='list'), - vpc_id = dict(), - internet_gateway = dict(type='bool', default=False), - resource_tags = dict(type='dict', required=True), - route_tables = dict(type='list'), - state = dict(choices=['present', 'absent'], default='present'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - ) - - state = module.params.get('state') - - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - # If we have a region specified, connect to its endpoint. - if region: - try: - vpc_conn = boto.vpc.connect_to_region( - region, - aws_access_key_id=aws_access_key, - aws_secret_access_key=aws_secret_key - ) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - else: - module.fail_json(msg="region must be specified") - - if module.params.get('state') == 'absent': - vpc_id = module.params.get('vpc_id') - cidr = module.params.get('cidr_block') - (changed, vpc_dict, new_vpc_id) = terminate_vpc(module, vpc_conn, vpc_id, cidr) - subnets_changed = None - elif module.params.get('state') == 'present': - # Changed is always set to true when provisioning a new VPC - (vpc_dict, new_vpc_id, subnets_changed, changed) = create_vpc(module, vpc_conn) - - module.exit_json(changed=changed, vpc_id=new_vpc_id, vpc=vpc_dict, subnets=subnets_changed) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/elasticache b/library/cloud/elasticache deleted file mode 100644 index 8c82f2fcc20..00000000000 --- a/library/cloud/elasticache +++ /dev/null @@ -1,547 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ ---- -module: elasticache -short_description: Manage cache clusters in Amazon Elasticache. -description: - - Manage cache clusters in Amazon Elasticache. - - Returns information about the specified cache cluster. -version_added: "1.4" -requirements: [ "boto" ] -author: Jim Dalton -options: - state: - description: - - C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. C(rebooted) will reboot the cluster, resulting in a momentary outage. - choices: ['present', 'absent', 'rebooted'] - required: true - name: - description: - - The cache cluster identifier - required: true - engine: - description: - - Name of the cache engine to be used (memcached or redis) - required: false - default: memcached - cache_engine_version: - description: - - The version number of the cache engine - required: false - default: 1.4.14 - node_type: - description: - - The compute and memory capacity of the nodes in the cache cluster - required: false - default: cache.m1.small - num_nodes: - description: - - The initial number of cache nodes that the cache cluster will have - required: false - cache_port: - description: - - The port number on which each of the cache nodes will accept connections - required: false - default: 11211 - security_group_ids: - description: - - A list of vpc security group names to associate with this cache cluster. Only use if inside a vpc - required: false - default: ['default'] - version_added: "1.6" - cache_security_groups: - description: - - A list of cache security group names to associate with this cache cluster - required: false - default: ['default'] - zone: - description: - - The EC2 Availability Zone in which the cache cluster will be created - required: false - default: None - wait: - description: - - Wait for cache cluster result before returning - required: false - default: yes - choices: [ "yes", "no" ] - hard_modify: - description: - - Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state - required: false - default: no - choices: [ "yes", "no" ] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_secret_key', 'secret_key'] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key'] - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - -""" - -EXAMPLES = """ -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. - -# Basic example -- local_action: - module: elasticache - name: "test-please-delete" - state: present - engine: memcached - cache_engine_version: 1.4.14 - node_type: cache.m1.small - num_nodes: 1 - cache_port: 11211 - cache_security_groups: - - default - zone: us-east-1d - - -# Ensure cache cluster is gone -- local_action: - module: elasticache - name: "test-please-delete" - state: absent - -# Reboot cache cluster -- local_action: - module: elasticache - name: "test-please-delete" - state: rebooted - -""" - -import sys -import os -import time - -try: - import boto - from boto.elasticache.layer1 import ElastiCacheConnection - from boto.regioninfo import RegionInfo -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -class ElastiCacheManager(object): - """Handles elasticache creation and destruction""" - - EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying'] - - def __init__(self, module, name, engine, cache_engine_version, node_type, - num_nodes, cache_port, cache_security_groups, security_group_ids, zone, wait, - hard_modify, aws_access_key, aws_secret_key, region): - self.module = module - self.name = name - self.engine = engine - self.cache_engine_version = cache_engine_version - self.node_type = node_type - self.num_nodes = num_nodes - self.cache_port = cache_port - self.cache_security_groups = cache_security_groups - self.security_group_ids = security_group_ids - self.zone = zone - self.wait = wait - self.hard_modify = hard_modify - - self.aws_access_key = aws_access_key - self.aws_secret_key = aws_secret_key - self.region = region - - self.changed = False - self.data = None - self.status = 'gone' - self.conn = self._get_elasticache_connection() - self._refresh_data() - - def ensure_present(self): - """Ensure cache cluster exists or create it if not""" - if self.exists(): - self.sync() - else: - self.create() - - def ensure_absent(self): - """Ensure cache cluster is gone or delete it if not""" - self.delete() - - def ensure_rebooted(self): - """Ensure cache cluster is gone or delete it if not""" - self.reboot() - - def exists(self): - """Check if cache cluster exists""" - return self.status in self.EXIST_STATUSES - - def create(self): - """Create an ElastiCache cluster""" - if self.status == 'available': - return - if self.status in ['creating', 'rebooting', 'modifying']: - if self.wait: - self._wait_for_status('available') - return - if self.status == 'deleting': - if self.wait: - self._wait_for_status('gone') - else: - msg = "'%s' is currently deleting. Cannot create." - self.module.fail_json(msg=msg % self.name) - - try: - response = self.conn.create_cache_cluster(cache_cluster_id=self.name, - num_cache_nodes=self.num_nodes, - cache_node_type=self.node_type, - engine=self.engine, - engine_version=self.cache_engine_version, - cache_security_group_names=self.cache_security_groups, - security_group_ids=self.security_group_ids, - preferred_availability_zone=self.zone, - port=self.cache_port) - except boto.exception.BotoServerError, e: - self.module.fail_json(msg=e.message) - cache_cluster_data = response['CreateCacheClusterResponse']['CreateCacheClusterResult']['CacheCluster'] - self._refresh_data(cache_cluster_data) - - self.changed = True - if self.wait: - self._wait_for_status('available') - return True - - def delete(self): - """Destroy an ElastiCache cluster""" - if self.status == 'gone': - return - if self.status == 'deleting': - if self.wait: - self._wait_for_status('gone') - return - if self.status in ['creating', 'rebooting', 'modifying']: - if self.wait: - self._wait_for_status('available') - else: - msg = "'%s' is currently %s. Cannot delete." - self.module.fail_json(msg=msg % (self.name, self.status)) - - try: - response = self.conn.delete_cache_cluster(cache_cluster_id=self.name) - except boto.exception.BotoServerError, e: - self.module.fail_json(msg=e.message) - cache_cluster_data = response['DeleteCacheClusterResponse']['DeleteCacheClusterResult']['CacheCluster'] - self._refresh_data(cache_cluster_data) - - self.changed = True - if self.wait: - self._wait_for_status('gone') - - def sync(self): - """Sync settings to cluster if required""" - if not self.exists(): - msg = "'%s' is %s. Cannot sync." - self.module.fail_json(msg=msg % (self.name, self.status)) - - if self.status in ['creating', 'rebooting', 'modifying']: - if self.wait: - self._wait_for_status('available') - else: - # Cluster can only be synced if available. If we can't wait - # for this, then just be done. - return - - if self._requires_destroy_and_create(): - if not self.hard_modify: - msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed." - self.module.fail_json(msg=msg % self.name) - if not self.wait: - msg = "'%s' requires destructive modification. 'wait' must be set to true." - self.module.fail_json(msg=msg % self.name) - self.delete() - self.create() - return - - if self._requires_modification(): - self.modify() - - def modify(self): - """Modify the cache cluster. Note it's only possible to modify a few select options.""" - nodes_to_remove = self._get_nodes_to_remove() - try: - response = self.conn.modify_cache_cluster(cache_cluster_id=self.name, - num_cache_nodes=self.num_nodes, - cache_node_ids_to_remove=nodes_to_remove, - cache_security_group_names=self.cache_security_groups, - security_group_ids=self.security_group_ids, - apply_immediately=True, - engine_version=self.cache_engine_version) - except boto.exception.BotoServerError, e: - self.module.fail_json(msg=e.message) - - cache_cluster_data = response['ModifyCacheClusterResponse']['ModifyCacheClusterResult']['CacheCluster'] - self._refresh_data(cache_cluster_data) - - self.changed = True - if self.wait: - self._wait_for_status('available') - - def reboot(self): - """Reboot the cache cluster""" - if not self.exists(): - msg = "'%s' is %s. Cannot reboot." - self.module.fail_json(msg=msg % (self.name, self.status)) - if self.status == 'rebooting': - return - if self.status in ['creating', 'modifying']: - if self.wait: - self._wait_for_status('available') - else: - msg = "'%s' is currently %s. Cannot reboot." - self.module.fail_json(msg=msg % (self.name, self.status)) - - # Collect ALL nodes for reboot - cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']] - try: - response = self.conn.reboot_cache_cluster(cache_cluster_id=self.name, - cache_node_ids_to_reboot=cache_node_ids) - except boto.exception.BotoServerError, e: - self.module.fail_json(msg=e.message) - - cache_cluster_data = response['RebootCacheClusterResponse']['RebootCacheClusterResult']['CacheCluster'] - self._refresh_data(cache_cluster_data) - - self.changed = True - if self.wait: - self._wait_for_status('available') - - def get_info(self): - """Return basic info about the cache cluster""" - info = { - 'name': self.name, - 'status': self.status - } - if self.data: - info['data'] = self.data - return info - - - def _wait_for_status(self, awaited_status): - """Wait for status to change from present status to awaited_status""" - status_map = { - 'creating': 'available', - 'rebooting': 'available', - 'modifying': 'available', - 'deleting': 'gone' - } - - if status_map[self.status] != awaited_status: - msg = "Invalid awaited status. '%s' cannot transition to '%s'" - self.module.fail_json(msg=msg % (self.status, awaited_status)) - - if awaited_status not in set(status_map.values()): - msg = "'%s' is not a valid awaited status." - self.module.fail_json(msg=msg % awaited_status) - - while True: - time.sleep(1) - self._refresh_data() - if self.status == awaited_status: - break - - def _requires_modification(self): - """Check if cluster requires (nondestructive) modification""" - # Check modifiable data attributes - modifiable_data = { - 'NumCacheNodes': self.num_nodes, - 'EngineVersion': self.cache_engine_version - } - for key, value in modifiable_data.iteritems(): - if self.data[key] != value: - return True - - # Check cache security groups - cache_security_groups = [] - for sg in self.data['CacheSecurityGroups']: - cache_security_groups.append(sg['CacheSecurityGroupName']) - if set(cache_security_groups) - set(self.cache_security_groups): - return True - - # check vpc security groups - vpc_security_groups = [] - security_groups = self.data['SecurityGroups'] or [] - for sg in security_groups: - vpc_security_groups.append(sg['SecurityGroupId']) - if set(vpc_security_groups) - set(self.security_group_ids): - return True - - return False - - def _requires_destroy_and_create(self): - """ - Check whether a destroy and create is required to synchronize cluster. - """ - unmodifiable_data = { - 'node_type': self.data['CacheNodeType'], - 'engine': self.data['Engine'], - 'cache_port': self._get_port() - } - # Only check for modifications if zone is specified - if self.zone is not None: - unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone'] - for key, value in unmodifiable_data.iteritems(): - if getattr(self, key) != value: - return True - return False - - def _get_elasticache_connection(self): - """Get an elasticache connection""" - try: - endpoint = "elasticache.%s.amazonaws.com" % self.region - connect_region = RegionInfo(name=self.region, endpoint=endpoint) - return ElastiCacheConnection(aws_access_key_id=self.aws_access_key, - aws_secret_access_key=self.aws_secret_key, - region=connect_region) - except boto.exception.NoAuthHandlerFound, e: - self.module.fail_json(msg=e.message) - - def _get_port(self): - """Get the port. Where this information is retrieved from is engine dependent.""" - if self.data['Engine'] == 'memcached': - return self.data['ConfigurationEndpoint']['Port'] - elif self.data['Engine'] == 'redis': - # Redis only supports a single node (presently) so just use - # the first and only - return self.data['CacheNodes'][0]['Endpoint']['Port'] - - def _refresh_data(self, cache_cluster_data=None): - """Refresh data about this cache cluster""" - if cache_cluster_data is None: - try: - response = self.conn.describe_cache_clusters(cache_cluster_id=self.name, - show_cache_node_info=True) - except boto.exception.BotoServerError: - self.data = None - self.status = 'gone' - return - cache_cluster_data = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'][0] - self.data = cache_cluster_data - self.status = self.data['CacheClusterStatus'] - - # The documentation for elasticache lies -- status on rebooting is set - # to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it - # here to make status checks etc. more sane. - if self.status == 'rebooting cache cluster nodes': - self.status = 'rebooting' - - def _get_nodes_to_remove(self): - """If there are nodes to remove, it figures out which need to be removed""" - num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes - if num_nodes_to_remove <= 0: - return None - - if not self.hard_modify: - msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed." - self.module.fail_json(msg=msg % self.name) - - cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']] - return cache_node_ids[-num_nodes_to_remove:] - - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - state={'required': True, 'choices': ['present', 'absent', 'rebooted']}, - name={'required': True}, - engine={'required': False, 'default': 'memcached'}, - cache_engine_version={'required': False, 'default': '1.4.14'}, - node_type={'required': False, 'default': 'cache.m1.small'}, - num_nodes={'required': False, 'default': None, 'type': 'int'}, - cache_port={'required': False, 'default': 11211, 'type': 'int'}, - cache_security_groups={'required': False, 'default': ['default'], - 'type': 'list'}, - security_group_ids={'required': False, 'default': [], - 'type': 'list'}, - zone={'required': False, 'default': None}, - wait={'required': False, 'type' : 'bool', 'default': True}, - hard_modify={'required': False, 'type': 'bool', 'default': False} - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - ) - - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - name = module.params['name'] - state = module.params['state'] - engine = module.params['engine'] - cache_engine_version = module.params['cache_engine_version'] - node_type = module.params['node_type'] - num_nodes = module.params['num_nodes'] - cache_port = module.params['cache_port'] - cache_security_groups = module.params['cache_security_groups'] - security_group_ids = module.params['security_group_ids'] - zone = module.params['zone'] - wait = module.params['wait'] - hard_modify = module.params['hard_modify'] - - if state == 'present' and not num_nodes: - module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0") - - if not region: - module.fail_json(msg=str("Either region or EC2_REGION environment variable must be set.")) - - elasticache_manager = ElastiCacheManager(module, name, engine, - cache_engine_version, node_type, - num_nodes, cache_port, - cache_security_groups, - security_group_ids, zone, wait, - hard_modify, aws_access_key, - aws_secret_key, region) - - if state == 'present': - elasticache_manager.ensure_present() - elif state == 'absent': - elasticache_manager.ensure_absent() - elif state == 'rebooted': - elasticache_manager.ensure_rebooted() - - facts_result = dict(changed=elasticache_manager.changed, - elasticache=elasticache_manager.get_info()) - - module.exit_json(**facts_result) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/gc_storage b/library/cloud/gc_storage deleted file mode 100644 index 1963a148da2..00000000000 --- a/library/cloud/gc_storage +++ /dev/null @@ -1,420 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: gc_storage -version_added: "1.4" -short_description: This module manages objects/buckets in Google Cloud Storage. -description: - - This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for information about setting the default project. - -options: - bucket: - description: - - Bucket name. - required: true - default: null - aliases: [] - object: - description: - - Keyname of the object inside the bucket. Can be also be used to create "virtual directories" (see examples). - required: false - default: null - aliases: [] - src: - description: - - The source file path when performing a PUT operation. - required: false - default: null - aliases: [] - dest: - description: - - The destination file path when downloading an object/key with a GET operation. - required: false - aliases: [] - force: - description: - - Forces an overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. - required: false - default: true - aliases: [ 'overwrite' ] - permission: - description: - - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'authenticated-read'. - required: false - default: private - expiration: - description: - - Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only avaialbe when public-read is the acl for the object. - required: false - default: null - aliases: [] - mode: - description: - - Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and delete (bucket). - required: true - default: null - aliases: [] - choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ] - gcs_secret_key: - description: - - GCS secret key. If not set then the value of the GCS_SECRET_KEY environment variable is used. - required: true - default: null - gcs_access_key: - description: - - GCS access key. If not set then the value of the GCS_ACCESS_KEY environment variable is used. - required: true - default: null - -requirements: [ "boto 2.9+" ] - -author: benno@ansible.com Note. Most of the code has been taken from the S3 module. - -''' - -EXAMPLES = ''' -# upload some content -- gc_storage: bucket=mybucket object=key.txt src=/usr/local/myfile.txt mode=put permission=public-read - -# download some content -- gc_storage: bucket=mybucket object=key.txt dest=/usr/local/myfile.txt mode=get - -# Download an object as a string to use else where in your playbook -- gc_storage: bucket=mybucket object=key.txt mode=get_str - -# Create an empty bucket -- gc_storage: bucket=mybucket mode=create - -# Create a bucket with key as directory -- gc_storage: bucket=mybucket object=/my/directory/path mode=create - -# Delete a bucket and all contents -- gc_storage: bucket=mybucket mode=delete -''' - -import sys -import os -import urlparse -import hashlib - -try: - import boto -except ImportError: - print "failed=True msg='boto 2.9+ required for this module'" - sys.exit(1) - -def grant_check(module, gs, obj): - try: - acp = obj.get_acl() - if module.params.get('permission') == 'public-read': - grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllUsers'] - if not grant: - obj.set_acl('public-read') - module.exit_json(changed=True, result="The objects permission as been set to public-read") - if module.params.get('permission') == 'authenticated-read': - grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers'] - if not grant: - obj.set_acl('authenticated-read') - module.exit_json(changed=True, result="The objects permission as been set to authenticated-read") - except gs.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - return True - - - -def key_check(module, gs, bucket, obj): - try: - bucket = gs.lookup(bucket) - key_check = bucket.get_key(obj) - except gs.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - if key_check: - grant_check(module, gs, key_check) - return True - else: - return False - -def keysum(module, gs, bucket, obj): - bucket = gs.lookup(bucket) - key_check = bucket.get_key(obj) - if not key_check: - return None - md5_remote = key_check.etag[1:-1] - etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5 - if etag_multipart is True: - module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.") - return md5_remote - -def bucket_check(module, gs, bucket): - try: - result = gs.lookup(bucket) - except gs.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - if result: - grant_check(module, gs, result) - return True - else: - return False - -def create_bucket(module, gs, bucket): - try: - bucket = gs.create_bucket(bucket) - bucket.set_acl(module.params.get('permission')) - except gs.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - if bucket: - return True - -def delete_bucket(module, gs, bucket): - try: - bucket = gs.lookup(bucket) - bucket_contents = bucket.list() - for key in bucket_contents: - bucket.delete_key(key.name) - bucket.delete() - return True - except gs.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def delete_key(module, gs, bucket, obj): - try: - bucket = gs.lookup(bucket) - bucket.delete_key(obj) - module.exit_json(msg="Object deleted from bucket ", changed=True) - except gs.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def create_dirkey(module, gs, bucket, obj): - try: - bucket = gs.lookup(bucket) - key = bucket.new_key(obj) - key.set_contents_from_string('') - module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True) - except gs.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def upload_file_check(src): - if os.path.exists(src): - file_exists is True - else: - file_exists is False - if os.path.isdir(src): - module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True) - return file_exists - -def path_check(path): - if os.path.exists(path): - return True - else: - return False - -def upload_gsfile(module, gs, bucket, obj, src, expiry): - try: - bucket = gs.lookup(bucket) - key = bucket.new_key(obj) - key.set_contents_from_filename(src) - key.set_acl(module.params.get('permission')) - url = key.generate_url(expiry) - module.exit_json(msg="PUT operation complete", url=url, changed=True) - except gs.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) - -def download_gsfile(module, gs, bucket, obj, dest): - try: - bucket = gs.lookup(bucket) - key = bucket.lookup(obj) - key.get_contents_to_filename(dest) - module.exit_json(msg="GET operation complete", changed=True) - except gs.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) - -def download_gsstr(module, gs, bucket, obj): - try: - bucket = gs.lookup(bucket) - key = bucket.lookup(obj) - contents = key.get_contents_as_string() - module.exit_json(msg="GET operation complete", contents=contents, changed=True) - except gs.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) - -def get_download_url(module, gs, bucket, obj, expiry): - try: - bucket = gs.lookup(bucket) - key = bucket.lookup(obj) - url = key.generate_url(expiry) - module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True) - except gs.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def handle_get(module, gs, bucket, obj, overwrite, dest): - md5_remote = keysum(module, gs, bucket, obj) - md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest() - if md5_local == md5_remote: - module.exit_json(changed=False) - if md5_local != md5_remote and not overwrite: - module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True) - else: - download_gsfile(module, gs, bucket, obj, dest) - -def handle_put(module, gs, bucket, obj, overwrite, src, expiration): - # Lets check to see if bucket exists to get ground truth. - bucket_rc = bucket_check(module, gs, bucket) - key_rc = key_check(module, gs, bucket, obj) - - # Lets check key state. Does it exist and if it does, compute the etag md5sum. - if bucket_rc and key_rc: - md5_remote = keysum(module, gs, bucket, obj) - md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest() - if md5_local == md5_remote: - module.exit_json(msg="Local and remote object are identical", changed=False) - if md5_local != md5_remote and not overwrite: - module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True) - else: - upload_gsfile(module, gs, bucket, obj, src, expiration) - - if not bucket_rc: - create_bucket(module, gs, bucket) - upload_gsfile(module, gs, bucket, obj, src, expiration) - - # If bucket exists but key doesn't, just upload. - if bucket_rc and not key_rc: - upload_gsfile(module, gs, bucket, obj, src, expiration) - -def handle_delete(module, gs, bucket, obj): - if bucket and not obj: - if bucket_check(module, gs, bucket): - module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=delete_bucket(module, gs, bucket)) - else: - module.exit_json(msg="Bucket does not exist.", changed=False) - if bucket and obj: - if bucket_check(module, gs, bucket): - if key_check(module, gs, bucket, obj): - module.exit_json(msg="Object has been deleted.", changed=delete_key(module, gs, bucket, obj)) - else: - module.exit_json(msg="Object does not exists.", changed=False) - else: - module.exit_json(msg="Bucket does not exist.", changed=False) - else: - module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True) - -def handle_create(module, gs, bucket, obj): - if bucket and not obj: - if bucket_check(module, gs, bucket): - module.exit_json(msg="Bucket already exists.", changed=False) - else: - module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket)) - if bucket and obj: - if bucket_check(module, gs, bucket): - if obj.endswith('/'): - dirobj = obj - else: - dirobj = obj + "/" - if key_check(module, gs, bucket, dirobj): - module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False) - else: - create_dirkey(module, gs, bucket, dirobj) - else: - create_bucket(module, gs, bucket) - create_dirkey(module, gs, bucket, dirobj) - -def main(): - module = AnsibleModule( - argument_spec = dict( - bucket = dict(required=True), - object = dict(default=None), - src = dict(default=None), - dest = dict(default=None), - expiration = dict(default=600, aliases=['expiry']), - mode = dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True), - permission = dict(choices=['private', 'public-read', 'authenticated-read'], default='private'), - gs_secret_key = dict(no_log=True, required=True), - gs_access_key = dict(required=True), - overwrite = dict(default=True, type='bool', aliases=['force']), - ), - ) - - bucket = module.params.get('bucket') - obj = module.params.get('object') - src = module.params.get('src') - dest = module.params.get('dest') - if dest: - dest = os.path.expanduser(dest) - mode = module.params.get('mode') - expiry = module.params.get('expiration') - gs_secret_key = module.params.get('gs_secret_key') - gs_access_key = module.params.get('gs_access_key') - overwrite = module.params.get('overwrite') - - if mode == 'put': - if not src or not object: - module.fail_json(msg="When using PUT, src, bucket, object are mandatory parameters") - if mode == 'get': - if not dest or not object: - module.fail_json(msg="When using GET, dest, bucket, object are mandatory parameters") - if obj: - obj = os.path.expanduser(module.params['object']) - - try: - gs = boto.connect_gs(gs_access_key, gs_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - - if mode == 'get': - if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj): - module.fail_json(msg="Target bucket/key cannot be found", failed=True) - if not path_check(dest): - download_gsfile(module, gs, bucket, obj, dest) - else: - handle_get(module, gs, bucket, obj, overwrite, dest) - - if mode == 'put': - if not path_check(src): - module.fail_json(msg="Local object for PUT does not exist", failed=True) - handle_put(module, gs, bucket, obj, overwrite, src, expiry) - - # Support for deleting an object if we have both params. - if mode == 'delete': - handle_delete(module, gs, bucket, obj) - - if mode == 'create': - handle_create(module, gs, bucket, obj) - - if mode == 'get_url': - if bucket and obj: - if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj): - get_download_url(module, gs, bucket, obj, expiry) - else: - module.fail_json(msg="Key/Bucket does not exist", failed=True) - else: - module.fail_json(msg="Bucket and Object parameters must be set", failed=True) - - # --------------------------- Get the String contents of an Object ------------------------- - if mode == 'get_str': - if bucket and obj: - if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj): - download_gsstr(module, gs, bucket, obj) - else: - module.fail_json(msg="Key/Bucket does not exist", failed=True) - else: - module.fail_json(msg="Bucket and Object parameters must be set", failed=True) - - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/cloud/gce b/library/cloud/gce deleted file mode 100755 index d429b61de20..00000000000 --- a/library/cloud/gce +++ /dev/null @@ -1,474 +0,0 @@ -#!/usr/bin/python -# Copyright 2013 Google Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: gce -version_added: "1.4" -short_description: create or terminate GCE instances -description: - - Creates or terminates Google Compute Engine (GCE) instances. See - U(https://cloud.google.com/products/compute-engine) for an overview. - Full install/configuration instructions for the gce* modules can - be found in the comments of ansible/test/gce_tests.py. -options: - image: - description: - - image string to use for the instance - required: false - default: "debian-7" - aliases: [] - instance_names: - description: - - a comma-separated list of instance names to create or destroy - required: false - default: null - aliases: [] - machine_type: - description: - - machine type to use for the instance, use 'n1-standard-1' by default - required: false - default: "n1-standard-1" - aliases: [] - metadata: - description: - - a hash/dictionary of custom data for the instance; '{"key":"value",...}' - required: false - default: null - aliases: [] - service_account_email: - version_added: 1.5.1 - description: - - service account email - required: false - default: null - aliases: [] - pem_file: - version_added: 1.5.1 - description: - - path to the pem file associated with the service account email - required: false - default: null - aliases: [] - project_id: - version_added: 1.5.1 - description: - - your GCE project ID - required: false - default: null - aliases: [] - name: - description: - - identifier when working with a single instance - required: false - aliases: [] - network: - description: - - name of the network, 'default' will be used if not specified - required: false - default: "default" - aliases: [] - persistent_boot_disk: - description: - - if set, create the instance with a persistent boot disk - required: false - default: "false" - aliases: [] - disks: - description: - - a list of persistent disks to attach to the instance; a string value gives the name of the disk; alternatively, a dictionary value can define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry will be the boot disk (which must be READ_WRITE). - required: false - default: null - aliases: [] - version_added: "1.7" - state: - description: - - desired state of the resource - required: false - default: "present" - choices: ["active", "present", "absent", "deleted"] - aliases: [] - tags: - description: - - a comma-separated list of tags to associate with the instance - required: false - default: null - aliases: [] - zone: - description: - - the GCE zone to use - required: true - default: "us-central1-a" - aliases: [] - -requirements: [ "libcloud" ] -notes: - - Either I(name) or I(instance_names) is required. -author: Eric Johnson -''' - -EXAMPLES = ''' -# Basic provisioning example. Create a single Debian 7 instance in the -# us-central1-a Zone of n1-standard-1 machine type. -- local_action: - module: gce - name: test-instance - zone: us-central1-a - machine_type: n1-standard-1 - image: debian-7 - -# Example using defaults and with metadata to create a single 'foo' instance -- local_action: - module: gce - name: foo - metadata: '{"db":"postgres", "group":"qa", "id":500}' - - -# Launch instances from a control node, runs some tasks on the new instances, -# and then terminate them -- name: Create a sandbox instance - hosts: localhost - vars: - names: foo,bar - machine_type: n1-standard-1 - image: debian-6 - zone: us-central1-a - service_account_email: unique-email@developer.gserviceaccount.com - pem_file: /path/to/pem_file - project_id: project-id - tasks: - - name: Launch instances - local_action: gce instance_names={{names}} machine_type={{machine_type}} - image={{image}} zone={{zone}} service_account_email={{ service_account_email }} - pem_file={{ pem_file }} project_id={{ project_id }} - register: gce - - name: Wait for SSH to come up - local_action: wait_for host={{item.public_ip}} port=22 delay=10 - timeout=60 state=started - with_items: {{gce.instance_data}} - -- name: Configure instance(s) - hosts: launched - sudo: True - roles: - - my_awesome_role - - my_awesome_tasks - -- name: Terminate instances - hosts: localhost - connection: local - tasks: - - name: Terminate instances that were previously launched - local_action: - module: gce - state: 'absent' - instance_names: {{gce.instance_names}} - -''' - -import sys - -try: - from libcloud.compute.types import Provider - from libcloud.compute.providers import get_driver - from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ - ResourceExistsError, ResourceInUseError, ResourceNotFoundError - _ = Provider.GCE -except ImportError: - print("failed=True " + \ - "msg='libcloud with GCE support (0.13.3+) required for this module'") - sys.exit(1) - -try: - from ast import literal_eval -except ImportError: - print("failed=True " + \ - "msg='GCE module requires python's 'ast' module, python v2.6+'") - sys.exit(1) - - -def get_instance_info(inst): - """Retrieves instance information from an instance object and returns it - as a dictionary. - - """ - metadata = {} - if 'metadata' in inst.extra and 'items' in inst.extra['metadata']: - for md in inst.extra['metadata']['items']: - metadata[md['key']] = md['value'] - - try: - netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] - except: - netname = None - if 'disks' in inst.extra: - disk_names = [disk_info['source'].split('/')[-1] - for disk_info - in sorted(inst.extra['disks'], - key=lambda disk_info: disk_info['index'])] - else: - disk_names = [] - return({ - 'image': not inst.image is None and inst.image.split('/')[-1] or None, - 'disks': disk_names, - 'machine_type': inst.size, - 'metadata': metadata, - 'name': inst.name, - 'network': netname, - 'private_ip': inst.private_ips[0], - 'public_ip': inst.public_ips[0], - 'status': ('status' in inst.extra) and inst.extra['status'] or None, - 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [], - 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None, - }) - -def create_instances(module, gce, instance_names): - """Creates new instances. Attributes other than instance_names are picked - up from 'module' - - module : AnsibleModule object - gce: authenticated GCE libcloud driver - instance_names: python list of instance names to create - - Returns: - A list of dictionaries with instance information - about the instances that were launched. - - """ - image = module.params.get('image') - machine_type = module.params.get('machine_type') - metadata = module.params.get('metadata') - network = module.params.get('network') - persistent_boot_disk = module.params.get('persistent_boot_disk') - disks = module.params.get('disks') - state = module.params.get('state') - tags = module.params.get('tags') - zone = module.params.get('zone') - - new_instances = [] - changed = False - - lc_image = gce.ex_get_image(image) - lc_disks = [] - disk_modes = [] - for i, disk in enumerate(disks or []): - if isinstance(disk, dict): - lc_disks.append(gce.ex_get_volume(disk['name'])) - disk_modes.append(disk['mode']) - else: - lc_disks.append(gce.ex_get_volume(disk)) - # boot disk is implicitly READ_WRITE - disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE') - lc_network = gce.ex_get_network(network) - lc_machine_type = gce.ex_get_size(machine_type) - lc_zone = gce.ex_get_zone(zone) - - # Try to convert the user's metadata value into the format expected - # by GCE. First try to ensure user has proper quoting of a - # dictionary-like syntax using 'literal_eval', then convert the python - # dict into a python list of 'key' / 'value' dicts. Should end up - # with: - # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...] - if metadata: - try: - md = literal_eval(metadata) - if not isinstance(md, dict): - raise ValueError('metadata must be a dict') - except ValueError, e: - print("failed=True msg='bad metadata: %s'" % str(e)) - sys.exit(1) - except SyntaxError, e: - print("failed=True msg='bad metadata syntax'") - sys.exit(1) - - items = [] - for k,v in md.items(): - items.append({"key": k,"value": v}) - metadata = {'items': items} - - # These variables all have default values but check just in case - if not lc_image or not lc_network or not lc_machine_type or not lc_zone: - module.fail_json(msg='Missing required create instance variable', - changed=False) - - for name in instance_names: - pd = None - if lc_disks: - pd = lc_disks[0] - elif persistent_boot_disk: - try: - pd = gce.create_volume(None, "%s" % name, image=lc_image) - except ResourceExistsError: - pd = gce.ex_get_volume("%s" % name, lc_zone) - inst = None - try: - inst = gce.create_node(name, lc_machine_type, lc_image, - location=lc_zone, ex_network=network, ex_tags=tags, - ex_metadata=metadata, ex_boot_disk=pd) - changed = True - except ResourceExistsError: - inst = gce.ex_get_node(name, lc_zone) - except GoogleBaseError, e: - module.fail_json(msg='Unexpected error attempting to create ' + \ - 'instance %s, error: %s' % (name, e.value)) - - for i, lc_disk in enumerate(lc_disks): - # Check whether the disk is already attached - if (len(inst.extra['disks']) > i): - attached_disk = inst.extra['disks'][i] - if attached_disk['source'] != lc_disk.extra['selfLink']: - module.fail_json( - msg=("Disk at index %d does not match: requested=%s found=%s" % ( - i, lc_disk.extra['selfLink'], attached_disk['source']))) - elif attached_disk['mode'] != disk_modes[i]: - module.fail_json( - msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % ( - i, disk_modes[i], attached_disk['mode']))) - else: - continue - gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i]) - # Work around libcloud bug: attached volumes don't get added - # to the instance metadata. get_instance_info() only cares about - # source and index. - if len(inst.extra['disks']) != i+1: - inst.extra['disks'].append( - {'source': lc_disk.extra['selfLink'], 'index': i}) - - if inst: - new_instances.append(inst) - - instance_names = [] - instance_json_data = [] - for inst in new_instances: - d = get_instance_info(inst) - instance_names.append(d['name']) - instance_json_data.append(d) - - return (changed, instance_json_data, instance_names) - - -def terminate_instances(module, gce, instance_names, zone_name): - """Terminates a list of instances. - - module: Ansible module object - gce: authenticated GCE connection object - instance_names: a list of instance names to terminate - zone_name: the zone where the instances reside prior to termination - - Returns a dictionary of instance names that were terminated. - - """ - changed = False - terminated_instance_names = [] - for name in instance_names: - inst = None - try: - inst = gce.ex_get_node(name, zone_name) - except ResourceNotFoundError: - pass - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - if inst: - gce.destroy_node(inst) - terminated_instance_names.append(inst.name) - changed = True - - return (changed, terminated_instance_names) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - image = dict(default='debian-7'), - instance_names = dict(), - machine_type = dict(default='n1-standard-1'), - metadata = dict(), - name = dict(), - network = dict(default='default'), - persistent_boot_disk = dict(type='bool', default=False), - disks = dict(type='list'), - state = dict(choices=['active', 'present', 'absent', 'deleted'], - default='present'), - tags = dict(type='list'), - zone = dict(default='us-central1-a'), - service_account_email = dict(), - pem_file = dict(), - project_id = dict(), - ) - ) - - gce = gce_connect(module) - - image = module.params.get('image') - instance_names = module.params.get('instance_names') - machine_type = module.params.get('machine_type') - metadata = module.params.get('metadata') - name = module.params.get('name') - network = module.params.get('network') - persistent_boot_disk = module.params.get('persistent_boot_disk') - state = module.params.get('state') - tags = module.params.get('tags') - zone = module.params.get('zone') - changed = False - - inames = [] - if isinstance(instance_names, list): - inames = instance_names - elif isinstance(instance_names, str): - inames = instance_names.split(',') - if name: - inames.append(name) - if not inames: - module.fail_json(msg='Must specify a "name" or "instance_names"', - changed=False) - if not zone: - module.fail_json(msg='Must specify a "zone"', changed=False) - - json_output = {'zone': zone} - if state in ['absent', 'deleted']: - json_output['state'] = 'absent' - (changed, terminated_instance_names) = terminate_instances(module, - gce, inames, zone) - - # based on what user specified, return the same variable, although - # value could be different if an instance could not be destroyed - if instance_names: - json_output['instance_names'] = terminated_instance_names - elif name: - json_output['name'] = name - - elif state in ['active', 'present']: - json_output['state'] = 'present' - (changed, instance_data,instance_name_list) = create_instances( - module, gce, inames) - json_output['instance_data'] = instance_data - if instance_names: - json_output['instance_names'] = instance_name_list - elif name: - json_output['name'] = name - - - json_output['changed'] = changed - print json.dumps(json_output) - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.gce import * - -main() diff --git a/library/cloud/gce_lb b/library/cloud/gce_lb deleted file mode 100644 index a60e14010cf..00000000000 --- a/library/cloud/gce_lb +++ /dev/null @@ -1,335 +0,0 @@ -#!/usr/bin/python -# Copyright 2013 Google Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: gce_lb -version_added: "1.5" -short_description: create/destroy GCE load-balancer resources -description: - - This module can create and destroy Google Compute Engine C(loadbalancer) - and C(httphealthcheck) resources. The primary LB resource is the - C(load_balancer) resource and the health check parameters are all - prefixed with I(httphealthcheck). - The full documentation for Google Compute Engine load balancing is at - U(https://developers.google.com/compute/docs/load-balancing/). However, - the ansible module simplifies the configuration by following the - libcloud model. - Full install/configuration instructions for the gce* modules can - be found in the comments of ansible/test/gce_tests.py. -options: - httphealthcheck_name: - description: - - the name identifier for the HTTP health check - required: false - default: null - httphealthcheck_port: - description: - - the TCP port to use for HTTP health checking - required: false - default: 80 - httphealthcheck_path: - description: - - the url path to use for HTTP health checking - required: false - default: "/" - httphealthcheck_interval: - description: - - the duration in seconds between each health check request - required: false - default: 5 - httphealthcheck_timeout: - description: - - the timeout in seconds before a request is considered a failed check - required: false - default: 5 - httphealthcheck_unhealthy_count: - description: - - number of consecutive failed checks before marking a node unhealthy - required: false - default: 2 - httphealthcheck_healthy_count: - description: - - number of consecutive successful checks before marking a node healthy - required: false - default: 2 - httphealthcheck_host: - description: - - host header to pass through on HTTP check requests - required: false - default: null - name: - description: - - name of the load-balancer resource - required: false - default: null - protocol: - description: - - the protocol used for the load-balancer packet forwarding, tcp or udp - required: false - default: "tcp" - choices: ['tcp', 'udp'] - region: - description: - - the GCE region where the load-balancer is defined - required: false - external_ip: - description: - - the external static IPv4 (or auto-assigned) address for the LB - required: false - default: null - port_range: - description: - - the port (range) to forward, e.g. 80 or 8000-8888 defaults to all ports - required: false - default: null - members: - description: - - a list of zone/nodename pairs, e.g ['us-central1-a/www-a', ...] - required: false - aliases: ['nodes'] - state: - description: - - desired state of the LB - default: "present" - choices: ["active", "present", "absent", "deleted"] - aliases: [] - service_account_email: - version_added: "1.6" - description: - - service account email - required: false - default: null - aliases: [] - pem_file: - version_added: "1.6" - description: - - path to the pem file associated with the service account email - required: false - default: null - aliases: [] - project_id: - version_added: "1.6" - description: - - your GCE project ID - required: false - default: null - aliases: [] - -requirements: [ "libcloud" ] -author: Eric Johnson -''' - -EXAMPLES = ''' -# Simple example of creating a new LB, adding members, and a health check -- local_action: - module: gce_lb - name: testlb - region: us-central1 - members: ["us-central1-a/www-a", "us-central1-b/www-b"] - httphealthcheck_name: hc - httphealthcheck_port: 80 - httphealthcheck_path: "/up" -''' - -import sys - - -try: - from libcloud.compute.types import Provider - from libcloud.compute.providers import get_driver - from libcloud.loadbalancer.types import Provider as Provider_lb - from libcloud.loadbalancer.providers import get_driver as get_driver_lb - from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ - ResourceExistsError, ResourceNotFoundError - _ = Provider.GCE -except ImportError: - print("failed=True " + \ - "msg='libcloud with GCE support required for this module.'") - sys.exit(1) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - httphealthcheck_name = dict(), - httphealthcheck_port = dict(default=80), - httphealthcheck_path = dict(default='/'), - httphealthcheck_interval = dict(default=5), - httphealthcheck_timeout = dict(default=5), - httphealthcheck_unhealthy_count = dict(default=2), - httphealthcheck_healthy_count = dict(default=2), - httphealthcheck_host = dict(), - name = dict(), - protocol = dict(default='tcp'), - region = dict(), - external_ip = dict(), - port_range = dict(), - members = dict(type='list'), - state = dict(default='present'), - service_account_email = dict(), - pem_file = dict(), - project_id = dict(), - ) - ) - - gce = gce_connect(module) - - httphealthcheck_name = module.params.get('httphealthcheck_name') - httphealthcheck_port = module.params.get('httphealthcheck_port') - httphealthcheck_path = module.params.get('httphealthcheck_path') - httphealthcheck_interval = module.params.get('httphealthcheck_interval') - httphealthcheck_timeout = module.params.get('httphealthcheck_timeout') - httphealthcheck_unhealthy_count = \ - module.params.get('httphealthcheck_unhealthy_count') - httphealthcheck_healthy_count = \ - module.params.get('httphealthcheck_healthy_count') - httphealthcheck_host = module.params.get('httphealthcheck_host') - name = module.params.get('name') - protocol = module.params.get('protocol') - region = module.params.get('region') - external_ip = module.params.get('external_ip') - port_range = module.params.get('port_range') - members = module.params.get('members') - state = module.params.get('state') - - try: - gcelb = get_driver_lb(Provider_lb.GCE)(gce_driver=gce) - gcelb.connection.user_agent_append("%s/%s" % ( - USER_AGENT_PRODUCT, USER_AGENT_VERSION)) - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - changed = False - json_output = {'name': name, 'state': state} - - if not name and not httphealthcheck_name: - module.fail_json(msg='Nothing to do, please specify a "name" ' + \ - 'or "httphealthcheck_name" parameter', changed=False) - - if state in ['active', 'present']: - # first, create the httphealthcheck if requested - hc = None - if httphealthcheck_name: - json_output['httphealthcheck_name'] = httphealthcheck_name - try: - hc = gcelb.ex_create_healthcheck(httphealthcheck_name, - host=httphealthcheck_host, path=httphealthcheck_path, - port=httphealthcheck_port, - interval=httphealthcheck_interval, - timeout=httphealthcheck_timeout, - unhealthy_threshold=httphealthcheck_unhealthy_count, - healthy_threshold=httphealthcheck_healthy_count) - changed = True - except ResourceExistsError: - hc = gce.ex_get_healthcheck(httphealthcheck_name) - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - if hc is not None: - json_output['httphealthcheck_host'] = hc.extra['host'] - json_output['httphealthcheck_path'] = hc.path - json_output['httphealthcheck_port'] = hc.port - json_output['httphealthcheck_interval'] = hc.interval - json_output['httphealthcheck_timeout'] = hc.timeout - json_output['httphealthcheck_unhealthy_count'] = \ - hc.unhealthy_threshold - json_output['httphealthcheck_healthy_count'] = \ - hc.healthy_threshold - - # create the forwarding rule (and target pool under the hood) - lb = None - if name: - if not region: - module.fail_json(msg='Missing required region name', - changed=False) - nodes = [] - output_nodes = [] - json_output['name'] = name - # members is a python list of 'zone/inst' strings - if members: - for node in members: - try: - zone, node_name = node.split('/') - nodes.append(gce.ex_get_node(node_name, zone)) - output_nodes.append(node) - except: - # skip nodes that are badly formatted or don't exist - pass - try: - if hc is not None: - lb = gcelb.create_balancer(name, port_range, protocol, - None, nodes, ex_region=region, ex_healthchecks=[hc], - ex_address=external_ip) - else: - lb = gcelb.create_balancer(name, port_range, protocol, - None, nodes, ex_region=region, ex_address=external_ip) - changed = True - except ResourceExistsError: - lb = gcelb.get_balancer(name) - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - if lb is not None: - json_output['members'] = output_nodes - json_output['protocol'] = protocol - json_output['region'] = region - json_output['external_ip'] = lb.ip - json_output['port_range'] = lb.port - hc_names = [] - if 'healthchecks' in lb.extra: - for hc in lb.extra['healthchecks']: - hc_names.append(hc.name) - json_output['httphealthchecks'] = hc_names - - if state in ['absent', 'deleted']: - # first, delete the load balancer (forwarding rule and target pool) - # if specified. - if name: - json_output['name'] = name - try: - lb = gcelb.get_balancer(name) - gcelb.destroy_balancer(lb) - changed = True - except ResourceNotFoundError: - pass - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - # destroy the health check if specified - if httphealthcheck_name: - json_output['httphealthcheck_name'] = httphealthcheck_name - try: - hc = gce.ex_get_healthcheck(httphealthcheck_name) - gce.ex_destroy_healthcheck(hc) - changed = True - except ResourceNotFoundError: - pass - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - - json_output['changed'] = changed - print json.dumps(json_output) - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.gce import * - -main() diff --git a/library/cloud/gce_net b/library/cloud/gce_net deleted file mode 100644 index c2c0b30452d..00000000000 --- a/library/cloud/gce_net +++ /dev/null @@ -1,271 +0,0 @@ -#!/usr/bin/python -# Copyright 2013 Google Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: gce_net -version_added: "1.5" -short_description: create/destroy GCE networks and firewall rules -description: - - This module can create and destroy Google Compue Engine networks and - firewall rules U(https://developers.google.com/compute/docs/networking). - The I(name) parameter is reserved for referencing a network while the - I(fwname) parameter is used to reference firewall rules. - IPv4 Address ranges must be specified using the CIDR - U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format. - Full install/configuration instructions for the gce* modules can - be found in the comments of ansible/test/gce_tests.py. -options: - allowed: - description: - - the protocol:ports to allow ('tcp:80' or 'tcp:80,443' or 'tcp:80-800') - required: false - default: null - aliases: [] - ipv4_range: - description: - - the IPv4 address range in CIDR notation for the network - required: false - aliases: ['cidr'] - fwname: - description: - - name of the firewall rule - required: false - default: null - aliases: ['fwrule'] - name: - description: - - name of the network - required: false - default: null - aliases: [] - src_range: - description: - - the source IPv4 address range in CIDR notation - required: false - default: null - aliases: ['src_cidr'] - src_tags: - description: - - the source instance tags for creating a firewall rule - required: false - default: null - aliases: [] - state: - description: - - desired state of the persistent disk - required: false - default: "present" - choices: ["active", "present", "absent", "deleted"] - aliases: [] - service_account_email: - version_added: "1.6" - description: - - service account email - required: false - default: null - aliases: [] - pem_file: - version_added: "1.6" - description: - - path to the pem file associated with the service account email - required: false - default: null - aliases: [] - project_id: - version_added: "1.6" - description: - - your GCE project ID - required: false - default: null - aliases: [] - -requirements: [ "libcloud" ] -author: Eric Johnson -''' - -EXAMPLES = ''' -# Simple example of creating a new network -- local_action: - module: gce_net - name: privatenet - ipv4_range: '10.240.16.0/24' - -# Simple example of creating a new firewall rule -- local_action: - module: gce_net - name: privatenet - allowed: tcp:80,8080 - src_tags: ["web", "proxy"] - -''' - -import sys - -try: - from libcloud.compute.types import Provider - from libcloud.compute.providers import get_driver - from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ - ResourceExistsError, ResourceNotFoundError - _ = Provider.GCE -except ImportError: - print("failed=True " + \ - "msg='libcloud with GCE support required for this module.'") - sys.exit(1) - - -def format_allowed(allowed): - """Format the 'allowed' value so that it is GCE compatible.""" - if allowed.count(":") == 0: - protocol = allowed - ports = [] - elif allowed.count(":") == 1: - protocol, ports = allowed.split(":") - else: - return [] - if ports.count(","): - ports = ports.split(",") - else: - ports = [ports] - return_val = {"IPProtocol": protocol} - if ports: - return_val["ports"] = ports - return [return_val] - - -def main(): - module = AnsibleModule( - argument_spec = dict( - allowed = dict(), - ipv4_range = dict(), - fwname = dict(), - name = dict(), - src_range = dict(), - src_tags = dict(type='list'), - state = dict(default='present'), - service_account_email = dict(), - pem_file = dict(), - project_id = dict(), - ) - ) - - gce = gce_connect(module) - - allowed = module.params.get('allowed') - ipv4_range = module.params.get('ipv4_range') - fwname = module.params.get('fwname') - name = module.params.get('name') - src_range = module.params.get('src_range') - src_tags = module.params.get('src_tags') - state = module.params.get('state') - - changed = False - json_output = {'state': state} - - if state in ['active', 'present']: - network = None - try: - network = gce.ex_get_network(name) - json_output['name'] = name - json_output['ipv4_range'] = network.cidr - except ResourceNotFoundError: - pass - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - # user wants to create a new network that doesn't yet exist - if name and not network: - if not ipv4_range: - module.fail_json(msg="Missing required 'ipv4_range' parameter", - changed=False) - - try: - network = gce.ex_create_network(name, ipv4_range) - json_output['name'] = name - json_output['ipv4_range'] = ipv4_range - changed = True - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - if fwname: - # user creating a firewall rule - if not allowed and not src_range and not src_tags: - if changed and network: - module.fail_json( - msg="Network created, but missing required " + \ - "firewall rule parameter(s)", changed=True) - module.fail_json( - msg="Missing required firewall rule parameter(s)", - changed=False) - - allowed_list = format_allowed(allowed) - - try: - gce.ex_create_firewall(fwname, allowed_list, network=name, - source_ranges=src_range, source_tags=src_tags) - changed = True - except ResourceExistsError: - pass - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - json_output['fwname'] = fwname - json_output['allowed'] = allowed - json_output['src_range'] = src_range - json_output['src_tags'] = src_tags - - if state in ['absent', 'deleted']: - if fwname: - json_output['fwname'] = fwname - fw = None - try: - fw = gce.ex_get_firewall(fwname) - except ResourceNotFoundError: - pass - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - if fw: - gce.ex_destroy_firewall(fw) - changed = True - if name: - json_output['name'] = name - network = None - try: - network = gce.ex_get_network(name) -# json_output['d1'] = 'found network name %s' % name - except ResourceNotFoundError: -# json_output['d2'] = 'not found network name %s' % name - pass - except Exception, e: -# json_output['d3'] = 'error with %s' % name - module.fail_json(msg=unexpected_error_msg(e), changed=False) - if network: -# json_output['d4'] = 'deleting %s' % name - gce.ex_destroy_network(network) -# json_output['d5'] = 'deleted %s' % name - changed = True - - json_output['changed'] = changed - print json.dumps(json_output) - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.gce import * - -main() diff --git a/library/cloud/gce_pd b/library/cloud/gce_pd deleted file mode 100644 index ddfe711304e..00000000000 --- a/library/cloud/gce_pd +++ /dev/null @@ -1,285 +0,0 @@ -#!/usr/bin/python -# Copyright 2013 Google Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: gce_pd -version_added: "1.4" -short_description: utilize GCE persistent disk resources -description: - - This module can create and destroy unformatted GCE persistent disks - U(https://developers.google.com/compute/docs/disks#persistentdisks). - It also supports attaching and detaching disks from running instances. - Full install/configuration instructions for the gce* modules can - be found in the comments of ansible/test/gce_tests.py. -options: - detach_only: - description: - - do not destroy the disk, merely detach it from an instance - required: false - default: "no" - choices: ["yes", "no"] - aliases: [] - instance_name: - description: - - instance name if you wish to attach or detach the disk - required: false - default: null - aliases: [] - mode: - description: - - GCE mount mode of disk, READ_ONLY (default) or READ_WRITE - required: false - default: "READ_ONLY" - choices: ["READ_WRITE", "READ_ONLY"] - aliases: [] - name: - description: - - name of the disk - required: true - default: null - aliases: [] - size_gb: - description: - - whole integer size of disk (in GB) to create, default is 10 GB - required: false - default: 10 - aliases: [] - image: - description: - - the source image to use for the disk - required: false - default: null - aliases: [] - version_added: "1.7" - snapshot: - description: - - the source snapshot to use for the disk - required: false - default: null - aliases: [] - version_added: "1.7" - state: - description: - - desired state of the persistent disk - required: false - default: "present" - choices: ["active", "present", "absent", "deleted"] - aliases: [] - zone: - description: - - zone in which to create the disk - required: false - default: "us-central1-b" - aliases: [] - service_account_email: - version_added: "1.6" - description: - - service account email - required: false - default: null - aliases: [] - pem_file: - version_added: "1.6" - description: - - path to the pem file associated with the service account email - required: false - default: null - aliases: [] - project_id: - version_added: "1.6" - description: - - your GCE project ID - required: false - default: null - aliases: [] - -requirements: [ "libcloud" ] -author: Eric Johnson -''' - -EXAMPLES = ''' -# Simple attachment action to an existing instance -- local_action: - module: gce_pd - instance_name: notlocalhost - size_gb: 5 - name: pd -''' - -import sys - -try: - from libcloud.compute.types import Provider - from libcloud.compute.providers import get_driver - from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ - ResourceExistsError, ResourceNotFoundError, ResourceInUseError - _ = Provider.GCE -except ImportError: - print("failed=True " + \ - "msg='libcloud with GCE support is required for this module.'") - sys.exit(1) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - detach_only = dict(type='bool'), - instance_name = dict(), - mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']), - name = dict(required=True), - size_gb = dict(default=10), - image = dict(), - snapshot = dict(), - state = dict(default='present'), - zone = dict(default='us-central1-b'), - service_account_email = dict(), - pem_file = dict(), - project_id = dict(), - ) - ) - - gce = gce_connect(module) - - detach_only = module.params.get('detach_only') - instance_name = module.params.get('instance_name') - mode = module.params.get('mode') - name = module.params.get('name') - size_gb = module.params.get('size_gb') - image = module.params.get('image') - snapshot = module.params.get('snapshot') - state = module.params.get('state') - zone = module.params.get('zone') - - if detach_only and not instance_name: - module.fail_json( - msg='Must specify an instance name when detaching a disk', - changed=False) - - disk = inst = None - changed = is_attached = False - - json_output = { 'name': name, 'zone': zone, 'state': state } - if detach_only: - json_output['detach_only'] = True - json_output['detached_from_instance'] = instance_name - - if instance_name: - # user wants to attach/detach from an existing instance - try: - inst = gce.ex_get_node(instance_name, zone) - # is the disk attached? - for d in inst.extra['disks']: - if d['deviceName'] == name: - is_attached = True - json_output['attached_mode'] = d['mode'] - json_output['attached_to_instance'] = inst.name - except: - pass - - # find disk if it already exists - try: - disk = gce.ex_get_volume(name) - json_output['size_gb'] = int(disk.size) - except ResourceNotFoundError: - pass - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - # user wants a disk to exist. If "instance_name" is supplied the user - # also wants it attached - if state in ['active', 'present']: - - if not size_gb: - module.fail_json(msg="Must supply a size_gb", changed=False) - try: - size_gb = int(round(float(size_gb))) - if size_gb < 1: - raise Exception - except: - module.fail_json(msg="Must supply a size_gb larger than 1 GB", - changed=False) - - if instance_name and inst is None: - module.fail_json(msg='Instance %s does not exist in zone %s' % ( - instance_name, zone), changed=False) - - if not disk: - if image is not None and snapshot is not None: - module.fail_json( - msg='Cannot give both image (%s) and snapshot (%s)' % ( - image, snapshot), changed=False) - lc_image = None - lc_snapshot = None - if image is not None: - lc_image = gce.ex_get_image(image) - elif snapshot is not None: - lc_snapshot = gce.ex_get_snapshot(snapshot) - try: - disk = gce.create_volume( - size_gb, name, location=zone, image=lc_image, - snapshot=lc_snapshot) - except ResourceExistsError: - pass - except QuotaExceededError: - module.fail_json(msg='Requested disk size exceeds quota', - changed=False) - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - json_output['size_gb'] = size_gb - if image is not None: - json_output['image'] = image - if snapshot is not None: - json_output['snapshot'] = snapshot - changed = True - if inst and not is_attached: - try: - gce.attach_volume(inst, disk, device=name, ex_mode=mode) - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - json_output['attached_to_instance'] = inst.name - json_output['attached_mode'] = mode - changed = True - - # user wants to delete a disk (or perhaps just detach it). - if state in ['absent', 'deleted'] and disk: - - if inst and is_attached: - try: - gce.detach_volume(disk, ex_node=inst) - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - changed = True - if not detach_only: - try: - gce.destroy_volume(disk) - except ResourceInUseError, e: - module.fail_json(msg=str(e.value), changed=False) - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - changed = True - - json_output['changed'] = changed - print json.dumps(json_output) - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.gce import * - -main() diff --git a/library/cloud/glance_image b/library/cloud/glance_image deleted file mode 100644 index d8b02602feb..00000000000 --- a/library/cloud/glance_image +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: glance_image -version_added: "1.2" -short_description: Add/Delete images from glance -description: - - Add or Remove images from the glance repository. -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: 'yes' - auth_url: - description: - - The keystone url for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - name: - description: - - Name that has to be given to the image - required: true - default: None - disk_format: - description: - - The format of the disk that is getting uploaded - required: false - default: qcow2 - container_format: - description: - - The format of the container - required: false - default: bare - owner: - description: - - The owner of the image - required: false - default: None - min_disk: - description: - - The minimum disk space required to deploy this image - required: false - default: None - min_ram: - description: - - The minimum ram required to deploy this image - required: false - default: None - is_public: - description: - - Whether the image can be accessed publicly - required: false - default: 'yes' - copy_from: - description: - - A url from where the image can be downloaded, mutually exclusive with file parameter - required: false - default: None - timeout: - description: - - The time to wait for the image process to complete in seconds - required: false - default: 180 - file: - description: - - The path to the file which has to be uploaded, mutually exclusive with copy_from - required: false - default: None - endpoint_type: - description: - - endpoint URL type - choices: [publicURL, internalURL] - required: false - default: publicURL -requirements: ["glanceclient", "keystoneclient"] - -''' - -EXAMPLES = ''' -# Upload an image from an HTTP URL -- glance_image: login_username=admin - login_password=passme - login_tenant_name=admin - name=cirros - container_format=bare - disk_format=qcow2 - state=present - copy_from=http:launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img -''' - -import time -try: - import glanceclient - from keystoneclient.v2_0 import client as ksclient -except ImportError: - print("failed=True msg='glanceclient and keystone client are required'") - - -def _get_ksclient(module, kwargs): - try: - client = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg="Error authenticating to the keystone: %s " % e.message) - return client - - -def _get_endpoint(module, client, endpoint_type): - try: - endpoint = client.service_catalog.url_for(service_type='image', endpoint_type=endpoint_type) - except Exception, e: - module.fail_json(msg="Error getting endpoint for glance: %s" % e.message) - return endpoint - - -def _get_glance_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint =_get_endpoint(module, _ksclient, kwargs.get('endpoint_type')) - kwargs = { - 'token': token, - } - try: - client = glanceclient.Client('1', endpoint, **kwargs) - except Exception, e: - module.fail_json(msg="Error in connecting to glance: %s" % e.message) - return client - - -def _glance_image_present(module, params, client): - try: - for image in client.images.list(): - if image.name == params['name']: - return image.id - return None - except Exception, e: - module.fail_json(msg="Error in fetching image list: %s" % e.message) - - -def _glance_image_create(module, params, client): - kwargs = { - 'name': params.get('name'), - 'disk_format': params.get('disk_format'), - 'container_format': params.get('container_format'), - 'owner': params.get('owner'), - 'is_public': params.get('is_public'), - 'copy_from': params.get('copy_from'), - } - try: - timeout = float(params.get('timeout')) - expire = time.time() + timeout - image = client.images.create(**kwargs) - if not params['copy_from']: - image.update(data=open(params['file'], 'rb')) - while time.time() < expire: - image = client.images.get(image.id) - if image.status == 'active': - break - time.sleep(5) - except Exception, e: - module.fail_json(msg="Error in creating image: %s" % e.message) - if image.status == 'active': - module.exit_json(changed=True, result=image.status, id=image.id) - else: - module.fail_json(msg=" The module timed out, please check manually " + image.status) - - -def _glance_delete_image(module, params, client): - try: - for image in client.images.list(): - if image.name == params['name']: - client.images.delete(image) - except Exception, e: - module.fail_json(msg="Error in deleting image: %s" % e.message) - module.exit_json(changed=True, result="Deleted") - - -def main(): - - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - name = dict(required=True), - disk_format = dict(default='qcow2', choices=['aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']), - container_format = dict(default='bare', choices=['aki', 'ari', 'bare', 'ovf']), - owner = dict(default=None), - min_disk = dict(default=None), - min_ram = dict(default=None), - is_public = dict(default=True), - copy_from = dict(default= None), - timeout = dict(default=180), - file = dict(default=None), - endpoint_type = dict(default='publicURL', choices=['publicURL', 'internalURL']), - state = dict(default='present', choices=['absent', 'present']) - )) - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive = [['file','copy_from']], - ) - if module.params['state'] == 'present': - if not module.params['file'] and not module.params['copy_from']: - module.fail_json(msg="Either file or copy_from variable should be set to create the image") - client = _get_glance_client(module, module.params) - id = _glance_image_present(module, module.params, client) - if not id: - _glance_image_create(module, module.params, client) - module.exit_json(changed=False, id=id, result="success") - - if module.params['state'] == 'absent': - client = _get_glance_client(module, module.params) - id = _glance_image_present(module, module.params, client) - if not id: - module.exit_json(changed=False, result="Success") - else: - _glance_delete_image(module, module.params, client) - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() diff --git a/library/cloud/keystone_user b/library/cloud/keystone_user deleted file mode 100644 index 5b412ca8008..00000000000 --- a/library/cloud/keystone_user +++ /dev/null @@ -1,394 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Based on Jimmy Tang's implementation - -DOCUMENTATION = ''' ---- -module: keystone_user -version_added: "1.2" -short_description: Manage OpenStack Identity (keystone) users, tenants and roles -description: - - Manage users,tenants, roles from OpenStack. -options: - login_user: - description: - - login username to authenticate to keystone - required: false - default: admin - login_password: - description: - - Password of login user - required: false - default: 'yes' - login_tenant_name: - description: - - The tenant login_user belongs to - required: false - default: None - version_added: "1.3" - token: - description: - - The token to be uses in case the password is not specified - required: false - default: None - endpoint: - description: - - The keystone url for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - user: - description: - - The name of the user that has to added/removed from OpenStack - required: false - default: None - password: - description: - - The password to be assigned to the user - required: false - default: None - tenant: - description: - - The tenant name that has be added/removed - required: false - default: None - tenant_description: - description: - - A description for the tenant - required: false - default: None - email: - description: - - An email address for the user - required: false - default: None - role: - description: - - The name of the role to be assigned or created - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present -requirements: [ python-keystoneclient ] -author: Lorin Hochstein -''' - -EXAMPLES = ''' -# Create a tenant -- keystone_user: tenant=demo tenant_description="Default Tenant" - -# Create a user -- keystone_user: user=john tenant=demo password=secrete - -# Apply the admin role to the john user in the demo tenant -- keystone_user: role=admin user=john tenant=demo -''' - -try: - from keystoneclient.v2_0 import client -except ImportError: - keystoneclient_found = False -else: - keystoneclient_found = True - - -def authenticate(endpoint, token, login_user, login_password, login_tenant_name): - """Return a keystone client object""" - - if token: - return client.Client(endpoint=endpoint, token=token) - else: - return client.Client(auth_url=endpoint, username=login_user, - password=login_password, tenant_name=login_tenant_name) - - -def tenant_exists(keystone, tenant): - """ Return True if tenant already exists""" - return tenant in [x.name for x in keystone.tenants.list()] - - -def user_exists(keystone, user): - """" Return True if user already exists""" - return user in [x.name for x in keystone.users.list()] - - -def get_tenant(keystone, name): - """ Retrieve a tenant by name""" - tenants = [x for x in keystone.tenants.list() if x.name == name] - count = len(tenants) - if count == 0: - raise KeyError("No keystone tenants with name %s" % name) - elif count > 1: - raise ValueError("%d tenants with name %s" % (count, name)) - else: - return tenants[0] - - -def get_user(keystone, name): - """ Retrieve a user by name""" - users = [x for x in keystone.users.list() if x.name == name] - count = len(users) - if count == 0: - raise KeyError("No keystone users with name %s" % name) - elif count > 1: - raise ValueError("%d users with name %s" % (count, name)) - else: - return users[0] - - -def get_role(keystone, name): - """ Retrieve a role by name""" - roles = [x for x in keystone.roles.list() if x.name == name] - count = len(roles) - if count == 0: - raise KeyError("No keystone roles with name %s" % name) - elif count > 1: - raise ValueError("%d roles with name %s" % (count, name)) - else: - return roles[0] - - -def get_tenant_id(keystone, name): - return get_tenant(keystone, name).id - - -def get_user_id(keystone, name): - return get_user(keystone, name).id - - -def ensure_tenant_exists(keystone, tenant_name, tenant_description, - check_mode): - """ Ensure that a tenant exists. - - Return (True, id) if a new tenant was created, (False, None) if it - already existed. - """ - - # Check if tenant already exists - try: - tenant = get_tenant(keystone, tenant_name) - except KeyError: - # Tenant doesn't exist yet - pass - else: - if tenant.description == tenant_description: - return (False, tenant.id) - else: - # We need to update the tenant description - if check_mode: - return (True, tenant.id) - else: - tenant.update(description=tenant_description) - return (True, tenant.id) - - # We now know we will have to create a new tenant - if check_mode: - return (True, None) - - ks_tenant = keystone.tenants.create(tenant_name=tenant_name, - description=tenant_description, - enabled=True) - return (True, ks_tenant.id) - - -def ensure_tenant_absent(keystone, tenant, check_mode): - """ Ensure that a tenant does not exist - - Return True if the tenant was removed, False if it didn't exist - in the first place - """ - if not tenant_exists(keystone, tenant): - return False - - # We now know we will have to delete the tenant - if check_mode: - return True - - -def ensure_user_exists(keystone, user_name, password, email, tenant_name, - check_mode): - """ Check if user exists - - Return (True, id) if a new user was created, (False, id) user alrady - exists - """ - - # Check if tenant already exists - try: - user = get_user(keystone, user_name) - except KeyError: - # Tenant doesn't exist yet - pass - else: - # User does exist, we're done - return (False, user.id) - - # We now know we will have to create a new user - if check_mode: - return (True, None) - - tenant = get_tenant(keystone, tenant_name) - - user = keystone.users.create(name=user_name, password=password, - email=email, tenant_id=tenant.id) - return (True, user.id) - - -def ensure_role_exists(keystone, user_name, tenant_name, role_name, - check_mode): - """ Check if role exists - - Return (True, id) if a new role was created or if the role was newly - assigned to the user for the tenant. (False, id) if the role already - exists and was already assigned to the user ofr the tenant. - - """ - # Check if the user has the role in the tenant - user = get_user(keystone, user_name) - tenant = get_tenant(keystone, tenant_name) - roles = [x for x in keystone.roles.roles_for_user(user, tenant) - if x.name == role_name] - count = len(roles) - - if count == 1: - # If the role is in there, we are done - role = roles[0] - return (False, role.id) - elif count > 1: - # Too many roles with the same name, throw an error - raise ValueError("%d roles with name %s" % (count, role_name)) - - # At this point, we know we will need to make changes - if check_mode: - return (True, None) - - # Get the role if it exists - try: - role = get_role(keystone, role_name) - except KeyError: - # Role doesn't exist yet - role = keystone.roles.create(role_name) - - # Associate the role with the user in the admin - keystone.roles.add_user_role(user, role, tenant) - return (True, role.id) - - -def ensure_user_absent(keystone, user, check_mode): - raise NotImplementedError("Not yet implemented") - - -def ensure_role_absent(keystone, uesr, tenant, role, check_mode): - raise NotImplementedError("Not yet implemented") - - -def main(): - - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - tenant_description=dict(required=False), - email=dict(required=False), - role=dict(required=False), - state=dict(default='present', choices=['present', 'absent']), - endpoint=dict(required=False, - default="http://127.0.0.1:35357/v2.0"), - token=dict(required=False), - login_user=dict(required=False), - login_password=dict(required=False), - login_tenant_name=dict(required=False) - )) - # keystone operations themselves take an endpoint, not a keystone auth_url - del(argument_spec['auth_url']) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[['token', 'login_user'], - ['token', 'login_password'], - ['token', 'login_tenant_name']] - ) - - if not keystoneclient_found: - module.fail_json(msg="the python-keystoneclient module is required") - - user = module.params['user'] - password = module.params['password'] - tenant = module.params['tenant'] - tenant_description = module.params['tenant_description'] - email = module.params['email'] - role = module.params['role'] - state = module.params['state'] - endpoint = module.params['endpoint'] - token = module.params['token'] - login_user = module.params['login_user'] - login_password = module.params['login_password'] - login_tenant_name = module.params['login_tenant_name'] - - keystone = authenticate(endpoint, token, login_user, login_password, login_tenant_name) - - check_mode = module.check_mode - - try: - d = dispatch(keystone, user, password, tenant, tenant_description, - email, role, state, endpoint, token, login_user, - login_password, check_mode) - except Exception, e: - if check_mode: - # If we have a failure in check mode - module.exit_json(changed=True, - msg="exception: %s" % e) - else: - module.fail_json(msg="exception: %s" % e) - else: - module.exit_json(**d) - - -def dispatch(keystone, user=None, password=None, tenant=None, - tenant_description=None, email=None, role=None, - state="present", endpoint=None, token=None, login_user=None, - login_password=None, check_mode=False): - """ Dispatch to the appropriate method. - - Returns a dict that will be passed to exit_json - - tenant user role state - ------ ---- ---- -------- - X present ensure_tenant_exists - X absent ensure_tenant_absent - X X present ensure_user_exists - X X absent ensure_user_absent - X X X present ensure_role_exists - X X X absent ensure_role_absent - - - """ - changed = False - id = None - if tenant and not user and not role and state == "present": - changed, id = ensure_tenant_exists(keystone, tenant, - tenant_description, check_mode) - elif tenant and not user and not role and state == "absent": - changed = ensure_tenant_absent(keystone, tenant, check_mode) - elif tenant and user and not role and state == "present": - changed, id = ensure_user_exists(keystone, user, password, - email, tenant, check_mode) - elif tenant and user and not role and state == "absent": - changed = ensure_user_absent(keystone, user, check_mode) - elif tenant and user and role and state == "present": - changed, id = ensure_role_exists(keystone, user, tenant, role, - check_mode) - elif tenant and user and role and state == "absent": - changed = ensure_role_absent(keystone, user, tenant, role, check_mode) - else: - # Should never reach here - raise ValueError("Code should never reach here") - - return dict(changed=changed, id=id) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -if __name__ == '__main__': - main() diff --git a/library/cloud/linode b/library/cloud/linode deleted file mode 100644 index 9fd265fde05..00000000000 --- a/library/cloud/linode +++ /dev/null @@ -1,493 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: linode -short_description: create / delete / stop / restart an instance in Linode Public Cloud -description: - - creates / deletes a Linode Public Cloud instance and optionally waits for it to be 'running'. -version_added: "1.3" -options: - state: - description: - - Indicate desired state of the resource - choices: ['present', 'active', 'started', 'absent', 'deleted', 'stopped', 'restarted'] - default: present - api_key: - description: - - Linode API key - default: null - name: - description: - - Name to give the instance (alphanumeric, dashes, underscore) - - To keep sanity on the Linode Web Console, name is prepended with LinodeID_ - default: null - type: string - linode_id: - description: - - Unique ID of a linode server - aliases: lid - default: null - type: integer - plan: - description: - - plan to use for the instance (Linode plan) - default: null - type: integer - payment_term: - description: - - payment term to use for the instance (payment term in months) - default: 1 - type: integer - choices: [1, 12, 24] - password: - description: - - root password to apply to a new server (auto generated if missing) - default: null - type: string - ssh_pub_key: - description: - - SSH public key applied to root user - default: null - type: string - swap: - description: - - swap size in MB - default: 512 - type: integer - distribution: - description: - - distribution to use for the instance (Linode Distribution) - default: null - type: integer - datacenter: - description: - - datacenter to create an instance in (Linode Datacenter) - default: null - type: integer - wait: - description: - - wait for the instance to be in state 'running' before returning - default: "no" - choices: [ "yes", "no" ] - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 -requirements: [ "linode-python", "pycurl" ] -author: Vincent Viallet -notes: - - LINODE_API_KEY env variable can be used instead -''' - -EXAMPLES = ''' -# Create a server -- local_action: - module: linode - api_key: 'longStringFromLinodeApi' - name: linode-test1 - plan: 1 - datacenter: 2 - distribution: 99 - password: 'superSecureRootPassword' - ssh_pub_key: 'ssh-rsa qwerty' - swap: 768 - wait: yes - wait_timeout: 600 - state: present - -# Ensure a running server (create if missing) -- local_action: - module: linode - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: 12345678 - plan: 1 - datacenter: 2 - distribution: 99 - password: 'superSecureRootPassword' - ssh_pub_key: 'ssh-rsa qwerty' - swap: 768 - wait: yes - wait_timeout: 600 - state: present - -# Delete a server -- local_action: - module: linode - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: 12345678 - state: absent - -# Stop a server -- local_action: - module: linode - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: 12345678 - state: stopped - -# Reboot a server -- local_action: - module: linode - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: 12345678 - state: restarted -''' - -import sys -import time -import os - -try: - import pycurl -except ImportError: - print("failed=True msg='pycurl required for this module'") - sys.exit(1) - - -try: - from linode import api as linode_api -except ImportError: - print("failed=True msg='linode-python required for this module'") - sys.exit(1) - - -def randompass(): - ''' - Generate a long random password that comply to Linode requirements - ''' - # Linode API currently requires the following: - # It must contain at least two of these four character classes: - # lower case letters - upper case letters - numbers - punctuation - # we play it safe :) - import random - import string - # as of python 2.4, this reseeds the PRNG from urandom - random.seed() - lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6)) - upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6)) - number = ''.join(random.choice(string.digits) for x in range(6)) - punct = ''.join(random.choice(string.punctuation) for x in range(6)) - p = lower + upper + number + punct - return ''.join(random.sample(p, len(p))) - -def getInstanceDetails(api, server): - ''' - Return the details of an instance, populating IPs, etc. - ''' - instance = {'id': server['LINODEID'], - 'name': server['LABEL'], - 'public': [], - 'private': []} - - # Populate with ips - for ip in api.linode_ip_list(LinodeId=server['LINODEID']): - if ip['ISPUBLIC'] and 'ipv4' not in instance: - instance['ipv4'] = ip['IPADDRESS'] - instance['fqdn'] = ip['RDNS_NAME'] - if ip['ISPUBLIC']: - instance['public'].append({'ipv4': ip['IPADDRESS'], - 'fqdn': ip['RDNS_NAME'], - 'ip_id': ip['IPADDRESSID']}) - else: - instance['private'].append({'ipv4': ip['IPADDRESS'], - 'fqdn': ip['RDNS_NAME'], - 'ip_id': ip['IPADDRESSID']}) - return instance - -def linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id, - payment_term, password, ssh_pub_key, swap, wait, wait_timeout): - instances = [] - changed = False - new_server = False - servers = [] - disks = [] - configs = [] - jobs = [] - - # See if we can match an existing server details with the provided linode_id - if linode_id: - # For the moment we only consider linode_id as criteria for match - # Later we can use more (size, name, etc.) and update existing - servers = api.linode_list(LinodeId=linode_id) - # Attempt to fetch details about disks and configs only if servers are - # found with linode_id - if servers: - disks = api.linode_disk_list(LinodeId=linode_id) - configs = api.linode_config_list(LinodeId=linode_id) - - # Act on the state - if state in ('active', 'present', 'started'): - # TODO: validate all the plan / distribution / datacenter are valid - - # Multi step process/validation: - # - need linode_id (entity) - # - need disk_id for linode_id - create disk from distrib - # - need config_id for linode_id - create config (need kernel) - - # Any create step triggers a job that need to be waited for. - if not servers: - for arg in ('name', 'plan', 'distribution', 'datacenter'): - if not eval(arg): - module.fail_json(msg='%s is required for active state' % arg) - # Create linode entity - new_server = True - try: - res = api.linode_create(DatacenterID=datacenter, PlanID=plan, - PaymentTerm=payment_term) - linode_id = res['LinodeID'] - # Update linode Label to match name - api.linode_update(LinodeId=linode_id, Label='%s_%s' % (linode_id, name)) - # Save server - servers = api.linode_list(LinodeId=linode_id) - except Exception, e: - module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) - - if not disks: - for arg in ('name', 'linode_id', 'distribution'): - if not eval(arg): - module.fail_json(msg='%s is required for active state' % arg) - # Create disks (1 from distrib, 1 for SWAP) - new_server = True - try: - if not password: - # Password is required on creation, if not provided generate one - password = randompass() - if not swap: - swap = 512 - # Create data disk - size = servers[0]['TOTALHD'] - swap - if ssh_pub_key: - res = api.linode_disk_createfromdistribution( - LinodeId=linode_id, DistributionID=distribution, - rootPass=password, rootSSHKey=ssh_pub_key, - Label='%s data disk (lid: %s)' % (name, linode_id), Size=size) - else: - res = api.linode_disk_createfromdistribution( - LinodeId=linode_id, DistributionID=distribution, rootPass=password, - Label='%s data disk (lid: %s)' % (name, linode_id), Size=size) - jobs.append(res['JobID']) - # Create SWAP disk - res = api.linode_disk_create(LinodeId=linode_id, Type='swap', - Label='%s swap disk (lid: %s)' % (name, linode_id), - Size=swap) - jobs.append(res['JobID']) - except Exception, e: - # TODO: destroy linode ? - module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) - - if not configs: - for arg in ('name', 'linode_id', 'distribution'): - if not eval(arg): - module.fail_json(msg='%s is required for active state' % arg) - - # Check architecture - for distrib in api.avail_distributions(): - if distrib['DISTRIBUTIONID'] != distribution: - continue - arch = '32' - if distrib['IS64BIT']: - arch = '64' - break - - # Get latest kernel matching arch - for kernel in api.avail_kernels(): - if not kernel['LABEL'].startswith('Latest %s' % arch): - continue - kernel_id = kernel['KERNELID'] - break - - # Get disk list - disks_id = [] - for disk in api.linode_disk_list(LinodeId=linode_id): - if disk['TYPE'] == 'ext3': - disks_id.insert(0, str(disk['DISKID'])) - continue - disks_id.append(str(disk['DISKID'])) - # Trick to get the 9 items in the list - while len(disks_id) < 9: - disks_id.append('') - disks_list = ','.join(disks_id) - - # Create config - new_server = True - try: - api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id, - Disklist=disks_list, Label='%s config' % name) - configs = api.linode_config_list(LinodeId=linode_id) - except Exception, e: - module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) - - # Start / Ensure servers are running - for server in servers: - # Refresh server state - server = api.linode_list(LinodeId=server['LINODEID'])[0] - # Ensure existing servers are up and running, boot if necessary - if server['STATUS'] != 1: - res = api.linode_boot(LinodeId=linode_id) - jobs.append(res['JobID']) - changed = True - - # wait here until the instances are up - wait_timeout = time.time() + wait_timeout - while wait and wait_timeout > time.time(): - # refresh the server details - server = api.linode_list(LinodeId=server['LINODEID'])[0] - # status: - # -2: Boot failed - # 1: Running - if server['STATUS'] in (-2, 1): - break - time.sleep(5) - if wait and wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = 'Timeout waiting on %s (lid: %s)' % - (server['LABEL'], server['LINODEID'])) - # Get a fresh copy of the server details - server = api.linode_list(LinodeId=server['LINODEID'])[0] - if server['STATUS'] == -2: - module.fail_json(msg = '%s (lid: %s) failed to boot' % - (server['LABEL'], server['LINODEID'])) - # From now on we know the task is a success - # Build instance report - instance = getInstanceDetails(api, server) - # depending on wait flag select the status - if wait: - instance['status'] = 'Running' - else: - instance['status'] = 'Starting' - - # Return the root password if this is a new box and no SSH key - # has been provided - if new_server and not ssh_pub_key: - instance['password'] = password - instances.append(instance) - - elif state in ('stopped'): - for arg in ('name', 'linode_id'): - if not eval(arg): - module.fail_json(msg='%s is required for active state' % arg) - - if not servers: - module.fail_json(msg = 'Server %s (lid: %s) not found' % (name, linode_id)) - - for server in servers: - instance = getInstanceDetails(api, server) - if server['STATUS'] != 2: - try: - res = api.linode_shutdown(LinodeId=linode_id) - except Exception, e: - module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) - instance['status'] = 'Stopping' - changed = True - else: - instance['status'] = 'Stopped' - instances.append(instance) - - elif state in ('restarted'): - for arg in ('name', 'linode_id'): - if not eval(arg): - module.fail_json(msg='%s is required for active state' % arg) - - if not servers: - module.fail_json(msg = 'Server %s (lid: %s) not found' % (name, linode_id)) - - for server in servers: - instance = getInstanceDetails(api, server) - try: - res = api.linode_reboot(LinodeId=server['LINODEID']) - except Exception, e: - module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) - instance['status'] = 'Restarting' - changed = True - instances.append(instance) - - elif state in ('absent', 'deleted'): - for server in servers: - instance = getInstanceDetails(api, server) - try: - api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True) - except Exception, e: - module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) - instance['status'] = 'Deleting' - changed = True - instances.append(instance) - - # Ease parsing if only 1 instance - if len(instances) == 1: - module.exit_json(changed=changed, instance=instances[0]) - module.exit_json(changed=changed, instances=instances) - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(default='present', choices=['active', 'present', 'started', - 'deleted', 'absent', 'stopped', - 'restarted']), - api_key = dict(), - name = dict(type='str'), - plan = dict(type='int'), - distribution = dict(type='int'), - datacenter = dict(type='int'), - linode_id = dict(type='int', aliases=['lid']), - payment_term = dict(type='int', default=1, choices=[1, 12, 24]), - password = dict(type='str'), - ssh_pub_key = dict(type='str'), - swap = dict(type='int', default=512), - wait = dict(type='bool', default=True), - wait_timeout = dict(default=300), - ) - ) - - state = module.params.get('state') - api_key = module.params.get('api_key') - name = module.params.get('name') - plan = module.params.get('plan') - distribution = module.params.get('distribution') - datacenter = module.params.get('datacenter') - linode_id = module.params.get('linode_id') - payment_term = module.params.get('payment_term') - password = module.params.get('password') - ssh_pub_key = module.params.get('ssh_pub_key') - swap = module.params.get('swap') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - # Setup the api_key - if not api_key: - try: - api_key = os.environ['LINODE_API_KEY'] - except KeyError, e: - module.fail_json(msg = 'Unable to load %s' % e.message) - - # setup the auth - try: - api = linode_api.Api(api_key) - api.test_echo() - except Exception, e: - module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) - - linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id, - payment_term, password, ssh_pub_key, swap, wait, wait_timeout) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/cloud/nova_compute b/library/cloud/nova_compute deleted file mode 100644 index 42c54753fb8..00000000000 --- a/library/cloud/nova_compute +++ /dev/null @@ -1,585 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# (c) 2013, John Dewey -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -import operator -import os - -try: - from novaclient.v1_1 import client as nova_client - from novaclient.v1_1 import floating_ips - from novaclient import exceptions - from novaclient import utils - import time -except ImportError: - print("failed=True msg='novaclient is required for this module'") - -DOCUMENTATION = ''' ---- -module: nova_compute -version_added: "1.2" -short_description: Create/Delete VMs from OpenStack -description: - - Create or Remove virtual machines from Openstack. -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: 'yes' - auth_url: - description: - - The keystone url for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - name: - description: - - Name that has to be given to the instance - required: true - default: None - image_id: - description: - - The id of the base image to boot. Mutually exclusive with image_name - required: true - default: None - image_name: - description: - - The name of the base image to boot. Mutually exclusive with image_id - required: true - default: None - version_added: "1.8" - image_exclude: - description: - - Text to use to filter image names, for the case, such as HP, where there are multiple image names matching the common identifying portions. image_exclude is a negative match filter - it is text that may not exist in the image name. Defaults to "(deprecated)" - version_added: "1.8" - flavor_id: - description: - - The id of the flavor in which the new VM has to be created. Mutually exclusive with flavor_ram - required: false - default: 1 - flavor_ram: - description: - - The minimum amount of ram in MB that the flavor in which the new VM has to be created must have. Mutually exclusive with flavor_id - required: false - default: 1 - version_added: "1.8" - flavor_include: - description: - - Text to use to filter flavor names, for the case, such as Rackspace, where there are multiple flavors that have the same ram count. flavor_include is a positive match filter - it must exist in the flavor name. - version_added: "1.8" - key_name: - description: - - The key pair name to be used when creating a VM - required: false - default: None - security_groups: - description: - - The name of the security group to which the VM should be added - required: false - default: None - nics: - description: - - A list of network id's to which the VM's interface should be attached - required: false - default: None - auto_floating_ip: - description: - - Should a floating ip be auto created and assigned - required: false - default: 'yes' - version_added: "1.8" - floating_ips: - decription: - - list of valid floating IPs that pre-exist to assign to this node - required: false - default: None - version_added: "1.8" - floating_ip_pools: - description: - - list of floating IP pools from which to choose a floating IP - required: false - default: None - version_added: "1.8" - availability_zone: - description: - - Name of the availability zone - required: false - default: None - version_added: "1.8" - meta: - description: - - A list of key value pairs that should be provided as a metadata to the new VM - required: false - default: None - wait: - description: - - If the module should wait for the VM to be created. - required: false - default: 'yes' - wait_for: - description: - - The amount of time the module should wait for the VM to get into active state - required: false - default: 180 - config_drive: - description: - - Whether to boot the server with config drive enabled - required: false - default: 'no' - version_added: "1.8" - user_data: - description: - - Opaque blob of data which is made available to the instance - required: false - default: None - version_added: "1.6" -requirements: ["novaclient"] -''' - -EXAMPLES = ''' -# Creates a new VM and attaches to a network and passes metadata to the instance -- nova_compute: - state: present - login_username: admin - login_password: admin - login_tenant_name: admin - name: vm1 - image_id: 4f905f38-e52a-43d2-b6ec-754a13ffb529 - key_name: ansible_key - wait_for: 200 - flavor_id: 4 - nics: - - net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723 - meta: - hostname: test1 - group: uge_master - -# Creates a new VM in HP Cloud AE1 region availability zone az2 and automatically assigns a floating IP -- name: launch a nova instance - hosts: localhost - tasks: - - name: launch an instance - nova_compute: - state: present - login_username: username - login_password: Equality7-2521 - login_tenant_name: username-project1 - name: vm1 - auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/ - region_name: region-b.geo-1 - availability_zone: az2 - image_id: 9302692b-b787-4b52-a3a6-daebb79cb498 - key_name: test - wait_for: 200 - flavor_id: 101 - security_groups: default - auto_floating_ip: yes - -# Creates a new VM in HP Cloud AE1 region availability zone az2 and assigns a pre-known floating IP -- name: launch a nova instance - hosts: localhost - tasks: - - name: launch an instance - nova_compute: - state: present - login_username: username - login_password: Equality7-2521 - login_tenant_name: username-project1 - name: vm1 - auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/ - region_name: region-b.geo-1 - availability_zone: az2 - image_id: 9302692b-b787-4b52-a3a6-daebb79cb498 - key_name: test - wait_for: 200 - flavor_id: 101 - floating-ips: - - 12.34.56.79 - -# Creates a new VM with 4G of RAM on Ubuntu Trusty, ignoring deprecated images -- name: launch a nova instance - hosts: localhost - tasks: - - name: launch an instance - nova_compute: - name: vm1 - state: present - login_username: username - login_password: Equality7-2521 - login_tenant_name: username-project1 - auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/ - region_name: region-b.geo-1 - image_name: Ubuntu Server 14.04 - image_exclude: deprecated - flavor_ram: 4096 - -# Creates a new VM with 4G of RAM on Ubuntu Trusty on a Rackspace Performance node in DFW -- name: launch a nova instance - hosts: localhost - tasks: - - name: launch an instance - nova_compute: - name: vm1 - state: present - login_username: username - login_password: Equality7-2521 - login_tenant_name: username-project1 - auth_url: https://identity.api.rackspacecloud.com/v2.0/ - region_name: DFW - image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM) - flavor_ram: 4096 - flavor_include: Performance -''' - - - -def _delete_server(module, nova): - name = None - server_list = None - try: - server_list = nova.servers.list(True, {'name': module.params['name']}) - if server_list: - server = [x for x in server_list if x.name == module.params['name']] - nova.servers.delete(server.pop()) - except Exception, e: - module.fail_json( msg = "Error in deleting vm: %s" % e.message) - if module.params['wait'] == 'no': - module.exit_json(changed = True, result = "deleted") - expire = time.time() + int(module.params['wait_for']) - while time.time() < expire: - name = nova.servers.list(True, {'name': module.params['name']}) - if not name: - module.exit_json(changed = True, result = "deleted") - time.sleep(5) - module.fail_json(msg = "Timed out waiting for server to get deleted, please check manually") - - -def _add_floating_ip_from_pool(module, nova, server): - - # instantiate FloatingIPManager object - floating_ip_obj = floating_ips.FloatingIPManager(nova) - - # empty dict and list - usable_floating_ips = {} - pools = [] - - # user specified - pools = module.params['floating_ip_pools'] - - # get the list of all floating IPs. Mileage may - # vary according to Nova Compute configuration - # per cloud provider - all_floating_ips = floating_ip_obj.list() - - # iterate through all pools of IP address. Empty - # string means all and is the default value - for pool in pools: - # temporary list per pool - pool_ips = [] - # loop through all floating IPs - for f_ip in all_floating_ips: - # if not reserved and the correct pool, add - if f_ip.instance_id is None and (f_ip.pool == pool): - pool_ips.append(f_ip.ip) - # only need one - break - - # if the list is empty, add for this pool - if not pool_ips: - try: - new_ip = nova.floating_ips.create(pool) - except Exception, e: - module.fail_json(msg = "Unable to create floating ip") - pool_ips.append(new_ip.ip) - # Add to the main list - usable_floating_ips[pool] = pool_ips - - # finally, add ip(s) to instance for each pool - for pool in usable_floating_ips: - for ip in usable_floating_ips[pool]: - try: - server.add_floating_ip(ip) - # We only need to assign one ip - but there is an inherent - # race condition and some other cloud operation may have - # stolen an available floating ip - break - except Exception, e: - module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message)) - - -def _add_floating_ip_list(module, server, ips): - # add ip(s) to instance - for ip in ips: - try: - server.add_floating_ip(ip) - except Exception, e: - module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message)) - - -def _add_auto_floating_ip(module, nova, server): - - try: - new_ip = nova.floating_ips.create() - except Exception as e: - module.fail_json(msg = "Unable to create floating ip: %s" % (e.message)) - - try: - server.add_floating_ip(new_ip) - except Exception as e: - # Clean up - we auto-created this ip, and it's not attached - # to the server, so the cloud will not know what to do with it - server.floating_ips.delete(new_ip) - module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message)) - - -def _add_floating_ip(module, nova, server): - - if module.params['floating_ip_pools']: - _add_floating_ip_from_pool(module, nova, server) - elif module.params['floating_ips']: - _add_floating_ip_list(module, server, module.params['floating_ips']) - elif module.params['auto_floating_ip']: - _add_auto_floating_ip(module, nova, server) - else: - return server - - # this may look redundant, but if there is now a - # floating IP, then it needs to be obtained from - # a recent server object if the above code path exec'd - try: - server = nova.servers.get(server.id) - except Exception, e: - module.fail_json(msg = "Error in getting info from instance: %s " % e.message) - return server - - -def _get_image_id(module, nova): - if module.params['image_name']: - for image in nova.images.list(): - if (module.params['image_name'] in image.name and ( - not module.params['image_exclude'] - or module.params['image_exclude'] not in image.name)): - return image.id - module.fail_json(msg = "Error finding image id from name(%s)" % module.params['image_name']) - return module.params['image_id'] - - -def _get_flavor_id(module, nova): - if module.params['flavor_ram']: - for flavor in sorted(nova.flavors.list(), key=operator.attrgetter('ram')): - if (flavor.ram >= module.params['flavor_ram'] and - (not module.params['flavor_include'] or module.params['flavor_include'] in flavor.name)): - return flavor.id - module.fail_json(msg = "Error finding flavor with %sMB of RAM" % module.params['flavor_ram']) - return module.params['flavor_id'] - - -def _create_server(module, nova): - image_id = _get_image_id(module, nova) - flavor_id = _get_flavor_id(module, nova) - bootargs = [module.params['name'], image_id, flavor_id] - bootkwargs = { - 'nics' : module.params['nics'], - 'meta' : module.params['meta'], - 'security_groups': module.params['security_groups'].split(','), - #userdata is unhyphenated in novaclient, but hyphenated here for consistency with the ec2 module: - 'userdata': module.params['user_data'], - 'config_drive': module.params['config_drive'], - } - - for optional_param in ('region_name', 'key_name', 'availability_zone'): - if module.params[optional_param]: - bootkwargs[optional_param] = module.params[optional_param] - try: - server = nova.servers.create(*bootargs, **bootkwargs) - server = nova.servers.get(server.id) - except Exception, e: - module.fail_json( msg = "Error in creating instance: %s " % e.message) - if module.params['wait'] == 'yes': - expire = time.time() + int(module.params['wait_for']) - while time.time() < expire: - try: - server = nova.servers.get(server.id) - except Exception, e: - module.fail_json( msg = "Error in getting info from instance: %s" % e.message) - if server.status == 'ACTIVE': - server = _add_floating_ip(module, nova, server) - - private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private') - public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public') - - # now exit with info - module.exit_json(changed = True, id = server.id, private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info) - - if server.status == 'ERROR': - module.fail_json(msg = "Error in creating the server, please check logs") - time.sleep(2) - - module.fail_json(msg = "Timeout waiting for the server to come up.. Please check manually") - if server.status == 'ERROR': - module.fail_json(msg = "Error in creating the server.. Please check manually") - private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private') - public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public') - - module.exit_json(changed = True, id = info['id'], private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info) - - -def _delete_floating_ip_list(module, nova, server, extra_ips): - for ip in extra_ips: - nova.servers.remove_floating_ip(server=server.id, address=ip) - - -def _check_floating_ips(module, nova, server): - changed = False - if module.params['floating_ip_pools'] or module.params['floating_ips'] or module.params['auto_floating_ip']: - ips = openstack_find_nova_addresses(server.addresses, 'floating') - if not ips: - # If we're configured to have a floating but we don't have one, - # let's add one - server = _add_floating_ip(module, nova, server) - changed = True - elif module.params['floating_ips']: - # we were configured to have specific ips, let's make sure we have - # those - missing_ips = [] - for ip in module.params['floating_ips']: - if ip not in ips: - missing_ips.append(ip) - if missing_ips: - server = _add_floating_ip_list(module, server, missing_ips) - changed = True - extra_ips = [] - for ip in ips: - if ip not in module.params['floating_ips']: - extra_ips.append(ip) - if extra_ips: - _delete_floating_ip_list(module, server, extra_ips) - changed = True - return (changed, server) - - -def _get_server_state(module, nova): - server = None - try: - servers = nova.servers.list(True, {'name': module.params['name']}) - if servers: - # the {'name': module.params['name']} will also return servers - # with names that partially match the server name, so we have to - # strictly filter here - servers = [x for x in servers if x.name == module.params['name']] - if servers: - server = servers[0] - except Exception, e: - module.fail_json(msg = "Error in getting the server list: %s" % e.message) - if server and module.params['state'] == 'present': - if server.status != 'ACTIVE': - module.fail_json( msg="The VM is available but not Active. state:" + server.status) - (ip_changed, server) = _check_floating_ips(module, nova, server) - private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private') - public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public') - module.exit_json(changed = ip_changed, id = server.id, public_ip = ''.join(public), private_ip = ''.join(private), info = server._info) - if server and module.params['state'] == 'absent': - return True - if module.params['state'] == 'absent': - module.exit_json(changed = False, result = "not present") - return True - - - -def main(): - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - name = dict(required=True), - image_id = dict(default=None), - image_name = dict(default=None), - image_exclude = dict(default='(deprecated)'), - flavor_id = dict(default=1), - flavor_ram = dict(default=None, type='int'), - flavor_include = dict(default=None), - key_name = dict(default=None), - security_groups = dict(default='default'), - nics = dict(default=None), - meta = dict(default=None), - wait = dict(default='yes', choices=['yes', 'no']), - wait_for = dict(default=180), - state = dict(default='present', choices=['absent', 'present']), - user_data = dict(default=None), - config_drive = dict(default=False, type='bool'), - auto_floating_ip = dict(default=False, type='bool'), - floating_ips = dict(default=None), - floating_ip_pools = dict(default=None), - )) - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['auto_floating_ip','floating_ips'], - ['auto_floating_ip','floating_ip_pools'], - ['floating_ips','floating_ip_pools'], - ['image_id','image_name'], - ['flavor_id','flavor_ram'], - ], - ) - - nova = nova_client.Client(module.params['login_username'], - module.params['login_password'], - module.params['login_tenant_name'], - module.params['auth_url'], - region_name=module.params['region_name'], - service_type='compute') - try: - nova.authenticate() - except exceptions.Unauthorized, e: - module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" % e.message) - except exceptions.AuthorizationFailure, e: - module.fail_json(msg = "Unable to authorize user: %s" % e.message) - - if module.params['state'] == 'present': - if not module.params['image_id'] and not module.params['image_name']: - module.fail_json( msg = "Parameter 'image_id' or `image_name` is required if state == 'present'") - else: - _get_server_state(module, nova) - _create_server(module, nova) - if module.params['state'] == 'absent': - _get_server_state(module, nova) - _delete_server(module, nova) - -# this is magic, see lib/ansible/module_common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/nova_keypair b/library/cloud/nova_keypair deleted file mode 100644 index c7c9affb3e6..00000000000 --- a/library/cloud/nova_keypair +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# (c) 2013, John Dewey -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - from novaclient.v1_1 import client as nova_client - from novaclient import exceptions as exc - import time -except ImportError: - print("failed=True msg='novaclient is required for this module to work'") - -DOCUMENTATION = ''' ---- -module: nova_keypair -version_added: "1.2" -short_description: Add/Delete key pair from nova -description: - - Add or Remove key pair from nova . -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: 'yes' - auth_url: - description: - - The keystone url for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - name: - description: - - Name that has to be given to the key pair - required: true - default: None - public_key: - description: - - The public key that would be uploaded to nova and injected to vm's upon creation - required: false - default: None - -requirements: ["novaclient"] -''' -EXAMPLES = ''' -# Creates a key pair with the running users public key -- nova_keypair: state=present login_username=admin - login_password=admin login_tenant_name=admin name=ansible_key - public_key={{ lookup('file','~/.ssh/id_rsa.pub') }} - -# Creates a new key pair and the private key returned after the run. -- nova_keypair: state=present login_username=admin login_password=admin - login_tenant_name=admin name=ansible_key -''' - -def main(): - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - name = dict(required=True), - public_key = dict(default=None), - state = dict(default='present', choices=['absent', 'present']) - )) - module = AnsibleModule(argument_spec=argument_spec) - - nova = nova_client.Client(module.params['login_username'], - module.params['login_password'], - module.params['login_tenant_name'], - module.params['auth_url'], - region_name=module.params['region_name'], - service_type='compute') - try: - nova.authenticate() - except exc.Unauthorized, e: - module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" % e.message) - except exc.AuthorizationFailure, e: - module.fail_json(msg = "Unable to authorize user: %s" % e.message) - - if module.params['state'] == 'present': - for key in nova.keypairs.list(): - if key.name == module.params['name']: - if module.params['public_key'] and (module.params['public_key'] != key.public_key ): - module.fail_json(msg = "name {} present but key hash not the same as offered. Delete key first.".format(key['name'])) - else: - module.exit_json(changed = False, result = "Key present") - try: - key = nova.keypairs.create(module.params['name'], module.params['public_key']) - except Exception, e: - module.exit_json(msg = "Error in creating the keypair: %s" % e.message) - if not module.params['public_key']: - module.exit_json(changed = True, key = key.private_key) - module.exit_json(changed = True, key = None) - if module.params['state'] == 'absent': - for key in nova.keypairs.list(): - if key.name == module.params['name']: - try: - nova.keypairs.delete(module.params['name']) - except Exception, e: - module.fail_json(msg = "The keypair deletion has failed: %s" % e.message) - module.exit_json( changed = True, result = "deleted") - module.exit_json(changed = False, result = "not present") - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/ovirt b/library/cloud/ovirt deleted file mode 100755 index fb84e918001..00000000000 --- a/library/cloud/ovirt +++ /dev/null @@ -1,425 +0,0 @@ -#!/usr/bin/python - -# (c) 2013, Vincent Van der Kussen -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ovirt -author: Vincent Van der Kussen -short_description: oVirt/RHEV platform management -description: - - allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform -version_added: "1.4" -options: - user: - description: - - the user to authenticate with - default: null - required: true - aliases: [] - url: - description: - - the url of the oVirt instance - default: null - required: true - aliases: [] - instance_name: - description: - - the name of the instance to use - default: null - required: true - aliases: [ vmname ] - password: - description: - - password of the user to authenticate with - default: null - required: true - aliases: [] - image: - description: - - template to use for the instance - default: null - required: false - aliases: [] - resource_type: - description: - - whether you want to deploy an image or create an instance from scratch. - default: null - required: false - aliases: [] - choices: [ 'new', 'template' ] - zone: - description: - - deploy the image to this oVirt cluster - default: null - required: false - aliases: [] - instance_disksize: - description: - - size of the instance's disk in GB - default: null - required: false - aliases: [ vm_disksize] - instance_cpus: - description: - - the instance's number of cpu's - default: 1 - required: false - aliases: [ vmcpus ] - instance_nic: - description: - - name of the network interface in oVirt/RHEV - default: null - required: false - aliases: [ vmnic ] - instance_network: - description: - - the logical network the machine should belong to - default: rhevm - required: false - aliases: [ vmnetwork ] - instance_mem: - description: - - the instance's amount of memory in MB - default: null - required: false - aliases: [ vmmem ] - instance_type: - description: - - define if the instance is a server or desktop - default: server - required: false - aliases: [ vmtype ] - choices: [ 'server', 'desktop' ] - disk_alloc: - description: - - define if disk is thin or preallocated - default: thin - required: false - aliases: [] - choices: [ 'thin', 'preallocated' ] - disk_int: - description: - - interface type of the disk - default: virtio - required: false - aliases: [] - choices: [ 'virtio', 'ide' ] - instance_os: - description: - - type of Operating System - default: null - required: false - aliases: [ vmos ] - instance_cores: - description: - - define the instance's number of cores - default: 1 - required: false - aliases: [ vmcores ] - sdomain: - description: - - the Storage Domain where you want to create the instance's disk on. - default: null - required: false - aliases: [] - region: - description: - - the oVirt/RHEV datacenter where you want to deploy to - default: null - required: false - aliases: [] - state: - description: - - create, terminate or remove instances - default: 'present' - required: false - aliases: [] - choices: ['present', 'absent', 'shutdown', 'started', 'restarted'] - -requirements: [ "ovirt-engine-sdk" ] -''' -EXAMPLES = ''' -# Basic example provisioning from image. - -action: ovirt > - user=admin@internal - url=https://ovirt.example.com - instance_name=ansiblevm04 - password=secret - image=centos_64 - zone=cluster01 - resource_type=template" - -# Full example to create new instance from scratch -action: ovirt > - instance_name=testansible - resource_type=new - instance_type=server - user=admin@internal - password=secret - url=https://ovirt.example.com - instance_disksize=10 - zone=cluster01 - region=datacenter1 - instance_cpus=1 - instance_nic=nic1 - instance_network=rhevm - instance_mem=1000 - disk_alloc=thin - sdomain=FIBER01 - instance_cores=1 - instance_os=rhel_6x64 - disk_int=virtio" - -# stopping an instance -action: ovirt > - instance_name=testansible - state=stopped - user=admin@internal - password=secret - url=https://ovirt.example.com - -# starting an instance -action: ovirt > - instance_name=testansible - state=started - user=admin@internal - password=secret - url=https://ovirt.example.com - - -''' -try: - from ovirtsdk.api import API - from ovirtsdk.xml import params -except ImportError: - print "failed=True msg='ovirtsdk required for this module'" - sys.exit(1) - -# ------------------------------------------------------------------- # -# create connection with API -# -def conn(url, user, password): - api = API(url=url, username=user, password=password, insecure=True) - try: - value = api.test() - except: - print "error connecting to the oVirt API" - sys.exit(1) - return api - -# ------------------------------------------------------------------- # -# Create VM from scratch -def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int): - if vmdisk_alloc == 'thin': - # define VM params - vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype) - # define disk params - vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow', - storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) - # define network parameters - network_net = params.Network(name=vmnetwork) - nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio') - elif vmdisk_alloc == 'preallocated': - # define VM params - vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype) - # define disk params - vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw', - storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) - # define network parameters - network_net = params.Network(name=vmnetwork) - nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio') - - try: - conn.vms.add(vmparams) - except: - print "Error creating VM with specified parameters" - sys.exit(1) - vm = conn.vms.get(name=vmname) - try: - vm.disks.add(vmdisk) - except: - print "Error attaching disk" - try: - vm.nics.add(nic_net1) - except: - print "Error adding nic" - - -# create an instance from a template -def create_vm_template(conn, vmname, image, zone): - vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image),disks=params.Disks(clone=True)) - try: - conn.vms.add(vmparams) - except: - print 'error adding template %s' % image - sys.exit(1) - - -# start instance -def vm_start(conn, vmname): - vm = conn.vms.get(name=vmname) - vm.start() - -# Stop instance -def vm_stop(conn, vmname): - vm = conn.vms.get(name=vmname) - vm.stop() - -# restart instance -def vm_restart(conn, vmname): - state = vm_status(conn, vmname) - vm = conn.vms.get(name=vmname) - vm.stop() - while conn.vms.get(vmname).get_status().get_state() != 'down': - time.sleep(5) - vm.start() - -# remove an instance -def vm_remove(conn, vmname): - vm = conn.vms.get(name=vmname) - vm.delete() - -# ------------------------------------------------------------------- # -# VM statuses -# -# Get the VMs status -def vm_status(conn, vmname): - status = conn.vms.get(name=vmname).status.state - print "vm status is : %s" % status - return status - - -# Get VM object and return it's name if object exists -def get_vm(conn, vmname): - vm = conn.vms.get(name=vmname) - if vm == None: - name = "empty" - print "vmname: %s" % name - else: - name = vm.get_name() - print "vmname: %s" % name - return name - -# ------------------------------------------------------------------- # -# Hypervisor operations -# -# not available yet -# ------------------------------------------------------------------- # -# Main - -def main(): - - module = AnsibleModule( - argument_spec = dict( - state = dict(default='present', choices=['present', 'absent', 'shutdown', 'started', 'restart']), - #name = dict(required=True), - user = dict(required=True), - url = dict(required=True), - instance_name = dict(required=True, aliases=['vmname']), - password = dict(required=True), - image = dict(), - resource_type = dict(choices=['new', 'template']), - zone = dict(), - instance_disksize = dict(aliases=['vm_disksize']), - instance_cpus = dict(default=1, aliases=['vmcpus']), - instance_nic = dict(aliases=['vmnic']), - instance_network = dict(default='rhevm', aliases=['vmnetwork']), - instance_mem = dict(aliases=['vmmem']), - instance_type = dict(default='server', aliases=['vmtype'], choices=['server', 'desktop']), - disk_alloc = dict(default='thin', choices=['thin', 'preallocated']), - disk_int = dict(default='virtio', choices=['virtio', 'ide']), - instance_os = dict(aliases=['vmos']), - instance_cores = dict(default=1, aliases=['vmcores']), - sdomain = dict(), - region = dict(), - ) - ) - - state = module.params['state'] - user = module.params['user'] - url = module.params['url'] - vmname = module.params['instance_name'] - password = module.params['password'] - image = module.params['image'] # name of the image to deploy - resource_type = module.params['resource_type'] # template or from scratch - zone = module.params['zone'] # oVirt cluster - vmdisk_size = module.params['instance_disksize'] # disksize - vmcpus = module.params['instance_cpus'] # number of cpu - vmnic = module.params['instance_nic'] # network interface - vmnetwork = module.params['instance_network'] # logical network - vmmem = module.params['instance_mem'] # mem size - vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated - vmdisk_int = module.params['disk_int'] # disk interface virtio or ide - vmos = module.params['instance_os'] # Operating System - vmtype = module.params['instance_type'] # server or desktop - vmcores = module.params['instance_cores'] # number of cores - sdomain = module.params['sdomain'] # storage domain to store disk on - region = module.params['region'] # oVirt Datacenter - #initialize connection - c = conn(url+"/api", user, password) - - if state == 'present': - if get_vm(c, vmname) == "empty": - if resource_type == 'template': - create_vm_template(c, vmname, image, zone) - module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname,image)) - elif resource_type == 'new': - # FIXME: refactor, use keyword args. - create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int) - module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname) - else: - module.exit_json(changed=False, msg="You did not specify a resource type") - else: - module.exit_json(changed=False, msg="VM %s already exists" % vmname) - - if state == 'started': - if vm_status(c, vmname) == 'up': - module.exit_json(changed=False, msg="VM %s is already running" % vmname) - else: - vm_start(c, vmname) - module.exit_json(changed=True, msg="VM %s started" % vmname) - - if state == 'shutdown': - if vm_status(c, vmname) == 'down': - module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname) - else: - vm_stop(c, vmname) - module.exit_json(changed=True, msg="VM %s is shutting down" % vmname) - - if state == 'restart': - if vm_status(c, vmname) == 'up': - vm_restart(c, vmname) - module.exit_json(changed=True, msg="VM %s is restarted" % vmname) - else: - module.exit_json(changed=False, msg="VM %s is not running" % vmname) - - if state == 'absent': - if get_vm(c, vmname) == "empty": - module.exit_json(changed=False, msg="VM %s does not exist" % vmname) - else: - vm_remove(c, vmname) - module.exit_json(changed=True, msg="VM %s removed" % vmname) - - - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/cloud/quantum_floating_ip b/library/cloud/quantum_floating_ip deleted file mode 100644 index 17f78effffd..00000000000 --- a/library/cloud/quantum_floating_ip +++ /dev/null @@ -1,266 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - from novaclient.v1_1 import client as nova_client - try: - from neutronclient.neutron import client - except ImportError: - from quantumclient.quantum import client - from keystoneclient.v2_0 import client as ksclient - import time -except ImportError: - print("failed=True msg='novaclient,keystoneclient and quantumclient (or neutronclient) are required'") - -DOCUMENTATION = ''' ---- -module: quantum_floating_ip -version_added: "1.2" -short_description: Add/Remove floating IP from an instance -description: - - Add or Remove a floating IP to an instance -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: 'yes' - auth_url: - description: - - The keystone url for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - network_name: - description: - - Name of the network from which IP has to be assigned to VM. Please make sure the network is an external network - required: true - default: None - instance_name: - description: - - The name of the instance to which the IP address should be assigned - required: true - default: None - internal_network_name: - description: - - The name of the network of the port to associate with the floating ip. Necessary when VM multiple networks. - required: false - default: None - version_added: "1.5" -requirements: ["novaclient", "quantumclient", "neutronclient", "keystoneclient"] -''' - -EXAMPLES = ''' -# Assign a floating ip to the instance from an external network -- quantum_floating_ip: state=present login_username=admin login_password=admin - login_tenant_name=admin network_name=external_network - instance_name=vm1 internal_network_name=internal_network -''' - -def _get_ksclient(module, kwargs): - try: - kclient = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) - global _os_keystone - _os_keystone = kclient - return kclient - - -def _get_endpoint(module, ksclient): - try: - endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: - module.fail_json(msg = "Error getting network endpoint: %s" % e.message) - return endpoint - -def _get_neutron_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint = _get_endpoint(module, _ksclient) - kwargs = { - 'token': token, - 'endpoint_url': endpoint - } - try: - neutron = client.Client('2.0', **kwargs) - except Exception, e: - module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) - return neutron - -def _get_server_state(module, nova): - server_info = None - server = None - try: - for server in nova.servers.list(): - if server: - info = server._info - if info['name'] == module.params['instance_name']: - if info['status'] != 'ACTIVE' and module.params['state'] == 'present': - module.fail_json( msg="The VM is available but not Active. state:" + info['status']) - server_info = info - break - except Exception, e: - module.fail_json(msg = "Error in getting the server list: %s" % e.message) - return server_info, server - -def _get_port_info(neutron, module, instance_id, internal_network_name=None): - subnet_id = None - if internal_network_name: - kwargs = {'name': internal_network_name} - networks = neutron.list_networks(**kwargs) - network_id = networks['networks'][0]['id'] - kwargs = { - 'network_id': network_id, - 'ip_version': 4 - } - subnets = neutron.list_subnets(**kwargs) - subnet_id = subnets['subnets'][0]['id'] - kwargs = { - 'device_id': instance_id, - } - try: - ports = neutron.list_ports(**kwargs) - except Exception, e: - module.fail_json( msg = "Error in listing ports: %s" % e.message) - if subnet_id: - port = next(port for port in ports['ports'] if port['fixed_ips'][0]['subnet_id'] == subnet_id) - port_id = port['id'] - fixed_ip_address = port['fixed_ips'][0]['ip_address'] - else: - port_id = ports['ports'][0]['id'] - fixed_ip_address = ports['ports'][0]['fixed_ips'][0]['ip_address'] - if not ports['ports']: - return None, None - return fixed_ip_address, port_id - -def _get_floating_ip(module, neutron, fixed_ip_address): - kwargs = { - 'fixed_ip_address': fixed_ip_address - } - try: - ips = neutron.list_floatingips(**kwargs) - except Exception, e: - module.fail_json(msg = "error in fetching the floatingips's %s" % e.message) - if not ips['floatingips']: - return None, None - return ips['floatingips'][0]['id'], ips['floatingips'][0]['floating_ip_address'] - -def _create_floating_ip(neutron, module, port_id, net_id, fixed_ip): - kwargs = { - 'port_id': port_id, - 'floating_network_id': net_id, - 'fixed_ip_address': fixed_ip - } - try: - result = neutron.create_floatingip({'floatingip': kwargs}) - except Exception, e: - module.fail_json(msg="There was an error in updating the floating ip address: %s" % e.message) - module.exit_json(changed=True, result=result, public_ip=result['floatingip']['floating_ip_address']) - -def _get_net_id(neutron, module): - kwargs = { - 'name': module.params['network_name'], - } - try: - networks = neutron.list_networks(**kwargs) - except Exception, e: - module.fail_json("Error in listing neutron networks: %s" % e.message) - if not networks['networks']: - return None - return networks['networks'][0]['id'] - -def _update_floating_ip(neutron, module, port_id, floating_ip_id): - kwargs = { - 'port_id': port_id - } - try: - result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs}) - except Exception, e: - module.fail_json(msg="There was an error in updating the floating ip address: %s" % e.message) - module.exit_json(changed=True, result=result) - - -def main(): - - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - network_name = dict(required=True), - instance_name = dict(required=True), - state = dict(default='present', choices=['absent', 'present']), - internal_network_name = dict(default=None), - )) - module = AnsibleModule(argument_spec=argument_spec) - - try: - nova = nova_client.Client(module.params['login_username'], module.params['login_password'], - module.params['login_tenant_name'], module.params['auth_url'], service_type='compute') - neutron = _get_neutron_client(module, module.params) - except Exception, e: - module.fail_json(msg="Error in authenticating to nova: %s" % e.message) - - server_info, server_obj = _get_server_state(module, nova) - if not server_info: - module.fail_json(msg="The instance name provided cannot be found") - - fixed_ip, port_id = _get_port_info(neutron, module, server_info['id'], module.params['internal_network_name']) - if not port_id: - module.fail_json(msg="Cannot find a port for this instance, maybe fixed ip is not assigned") - - floating_id, floating_ip = _get_floating_ip(module, neutron, fixed_ip) - - if module.params['state'] == 'present': - if floating_ip: - module.exit_json(changed = False, public_ip=floating_ip) - net_id = _get_net_id(neutron, module) - if not net_id: - module.fail_json(msg = "cannot find the network specified, please check") - _create_floating_ip(neutron, module, port_id, net_id, fixed_ip) - - if module.params['state'] == 'absent': - if floating_ip: - _update_floating_ip(neutron, module, None, floating_id) - module.exit_json(changed=False) - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/quantum_floating_ip_associate b/library/cloud/quantum_floating_ip_associate deleted file mode 100644 index 91df2690b62..00000000000 --- a/library/cloud/quantum_floating_ip_associate +++ /dev/null @@ -1,218 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - from novaclient.v1_1 import client as nova_client - try: - from neutronclient.neutron import client - except ImportError: - from quantumclient.quantum import client - from keystoneclient.v2_0 import client as ksclient - import time -except ImportError: - print "failed=True msg='novaclient, keystone, and quantumclient (or neutronclient) client are required'" - -DOCUMENTATION = ''' ---- -module: quantum_floating_ip_associate -version_added: "1.2" -short_description: Associate or disassociate a particular floating IP with an instance -description: - - Associates or disassociates a specific floating IP with a particular instance -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - the tenant name of the login user - required: true - default: true - auth_url: - description: - - the keystone url for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - name of the region - required: false - default: None - state: - description: - - indicates the desired state of the resource - choices: ['present', 'absent'] - default: present - instance_name: - description: - - name of the instance to which the public IP should be assigned - required: true - default: None - ip_address: - description: - - floating ip that should be assigned to the instance - required: true - default: None -requirements: ["quantumclient", "neutronclient", "keystoneclient"] -''' - -EXAMPLES = ''' -# Associate a specific floating IP with an Instance -- quantum_floating_ip_associate: - state=present - login_username=admin - login_password=admin - login_tenant_name=admin - ip_address=1.1.1.1 - instance_name=vm1 -''' - -def _get_ksclient(module, kwargs): - try: - kclient = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) - global _os_keystone - _os_keystone = kclient - return kclient - - -def _get_endpoint(module, ksclient): - try: - endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: - module.fail_json(msg = "Error getting network endpoint: %s" % e.message) - return endpoint - -def _get_neutron_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint = _get_endpoint(module, _ksclient) - kwargs = { - 'token': token, - 'endpoint_url': endpoint - } - try: - neutron = client.Client('2.0', **kwargs) - except Exception, e: - module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) - return neutron - -def _get_server_state(module, nova): - server_info = None - server = None - try: - for server in nova.servers.list(): - if server: - info = server._info - if info['name'] == module.params['instance_name']: - if info['status'] != 'ACTIVE' and module.params['state'] == 'present': - module.fail_json(msg="The VM is available but not Active. state:" + info['status']) - server_info = info - break - except Exception, e: - module.fail_json(msg = "Error in getting the server list: %s" % e.message) - return server_info, server - -def _get_port_id(neutron, module, instance_id): - kwargs = dict(device_id = instance_id) - try: - ports = neutron.list_ports(**kwargs) - except Exception, e: - module.fail_json( msg = "Error in listing ports: %s" % e.message) - if not ports['ports']: - return None - return ports['ports'][0]['id'] - -def _get_floating_ip_id(module, neutron): - kwargs = { - 'floating_ip_address': module.params['ip_address'] - } - try: - ips = neutron.list_floatingips(**kwargs) - except Exception, e: - module.fail_json(msg = "error in fetching the floatingips's %s" % e.message) - if not ips['floatingips']: - module.fail_json(msg = "Could find the ip specified in parameter, Please check") - ip = ips['floatingips'][0]['id'] - if not ips['floatingips'][0]['port_id']: - state = "detached" - else: - state = "attached" - return state, ip - -def _update_floating_ip(neutron, module, port_id, floating_ip_id): - kwargs = { - 'port_id': port_id - } - try: - result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs}) - except Exception, e: - module.fail_json(msg = "There was an error in updating the floating ip address: %s" % e.message) - module.exit_json(changed = True, result = result, public_ip=module.params['ip_address']) - -def main(): - - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - ip_address = dict(required=True), - instance_name = dict(required=True), - state = dict(default='present', choices=['absent', 'present']) - )) - module = AnsibleModule(argument_spec=argument_spec) - - try: - nova = nova_client.Client(module.params['login_username'], module.params['login_password'], - module.params['login_tenant_name'], module.params['auth_url'], service_type='compute') - except Exception, e: - module.fail_json( msg = " Error in authenticating to nova: %s" % e.message) - neutron = _get_neutron_client(module, module.params) - state, floating_ip_id = _get_floating_ip_id(module, neutron) - if module.params['state'] == 'present': - if state == 'attached': - module.exit_json(changed = False, result = 'attached', public_ip=module.params['ip_address']) - server_info, server_obj = _get_server_state(module, nova) - if not server_info: - module.fail_json(msg = " The instance name provided cannot be found") - port_id = _get_port_id(neutron, module, server_info['id']) - if not port_id: - module.fail_json(msg = "Cannot find a port for this instance, maybe fixed ip is not assigned") - _update_floating_ip(neutron, module, port_id, floating_ip_id) - - if module.params['state'] == 'absent': - if state == 'detached': - module.exit_json(changed = False, result = 'detached') - if state == 'attached': - _update_floating_ip(neutron, module, None, floating_ip_id) - module.exit_json(changed = True, result = "detached") - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/quantum_network b/library/cloud/quantum_network deleted file mode 100644 index 6b0c66e7a12..00000000000 --- a/library/cloud/quantum_network +++ /dev/null @@ -1,279 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - try: - from neutronclient.neutron import client - except ImportError: - from quantumclient.quantum import client - from keystoneclient.v2_0 import client as ksclient -except ImportError: - print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'") - -DOCUMENTATION = ''' ---- -module: quantum_network -version_added: "1.4" -short_description: Creates/Removes networks from OpenStack -description: - - Add or Remove network from OpenStack. -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: 'yes' - tenant_name: - description: - - The name of the tenant for whom the network is created - required: false - default: None - auth_url: - description: - - The keystone url for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - name: - description: - - Name to be assigned to the nework - required: true - default: None - provider_network_type: - description: - - The type of the network to be created, gre, vlan, local. Available types depend on the plugin. The Quantum service decides if not specified. - required: false - default: None - provider_physical_network: - description: - - The physical network which would realize the virtual network for flat and vlan networks. - required: false - default: None - provider_segmentation_id: - description: - - The id that has to be assigned to the network, in case of vlan networks that would be vlan id and for gre the tunnel id - required: false - default: None - router_external: - description: - - If 'yes', specifies that the virtual network is a external network (public). - required: false - default: false - shared: - description: - - Whether this network is shared or not - required: false - default: false - admin_state_up: - description: - - Whether the state should be marked as up or down - required: false - default: true -requirements: ["quantumclient", "neutronclient", "keystoneclient"] - -''' - -EXAMPLES = ''' -# Create a GRE backed Quantum network with tunnel id 1 for tenant1 -- quantum_network: name=t1network tenant_name=tenant1 state=present - provider_network_type=gre provider_segmentation_id=1 - login_username=admin login_password=admin login_tenant_name=admin - -# Create an external network -- quantum_network: name=external_network state=present - provider_network_type=local router_external=yes - login_username=admin login_password=admin login_tenant_name=admin -''' - -_os_keystone = None -_os_tenant_id = None - -def _get_ksclient(module, kwargs): - try: - kclient = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message) - global _os_keystone - _os_keystone = kclient - return kclient - - -def _get_endpoint(module, ksclient): - try: - endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: - module.fail_json(msg = "Error getting network endpoint: %s " %e.message) - return endpoint - -def _get_neutron_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint = _get_endpoint(module, _ksclient) - kwargs = { - 'token': token, - 'endpoint_url': endpoint - } - try: - neutron = client.Client('2.0', **kwargs) - except Exception, e: - module.fail_json(msg = " Error in connecting to neutron: %s " %e.message) - return neutron - -def _set_tenant_id(module): - global _os_tenant_id - if not module.params['tenant_name']: - tenant_name = module.params['login_tenant_name'] - else: - tenant_name = module.params['tenant_name'] - - for tenant in _os_keystone.tenants.list(): - if tenant.name == tenant_name: - _os_tenant_id = tenant.id - break - if not _os_tenant_id: - module.fail_json(msg = "The tenant id cannot be found, please check the parameters") - - -def _get_net_id(neutron, module): - kwargs = { - 'tenant_id': _os_tenant_id, - 'name': module.params['name'], - } - try: - networks = neutron.list_networks(**kwargs) - except Exception, e: - module.fail_json(msg = "Error in listing neutron networks: %s" % e.message) - if not networks['networks']: - return None - return networks['networks'][0]['id'] - -def _create_network(module, neutron): - - neutron.format = 'json' - - network = { - 'name': module.params.get('name'), - 'tenant_id': _os_tenant_id, - 'provider:network_type': module.params.get('provider_network_type'), - 'provider:physical_network': module.params.get('provider_physical_network'), - 'provider:segmentation_id': module.params.get('provider_segmentation_id'), - 'router:external': module.params.get('router_external'), - 'shared': module.params.get('shared'), - 'admin_state_up': module.params.get('admin_state_up'), - } - - if module.params['provider_network_type'] == 'local': - network.pop('provider:physical_network', None) - network.pop('provider:segmentation_id', None) - - if module.params['provider_network_type'] == 'flat': - network.pop('provider:segmentation_id', None) - - if module.params['provider_network_type'] == 'gre': - network.pop('provider:physical_network', None) - - if module.params['provider_network_type'] is None: - network.pop('provider:network_type', None) - network.pop('provider:physical_network', None) - network.pop('provider:segmentation_id', None) - - try: - net = neutron.create_network({'network':network}) - except Exception, e: - module.fail_json(msg = "Error in creating network: %s" % e.message) - return net['network']['id'] - -def _delete_network(module, net_id, neutron): - - try: - id = neutron.delete_network(net_id) - except Exception, e: - module.fail_json(msg = "Error in deleting the network: %s" % e.message) - return True - -def main(): - - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - name = dict(required=True), - tenant_name = dict(default=None), - provider_network_type = dict(default=None, choices=['local', 'vlan', 'flat', 'gre']), - provider_physical_network = dict(default=None), - provider_segmentation_id = dict(default=None), - router_external = dict(default=False, type='bool'), - shared = dict(default=False, type='bool'), - admin_state_up = dict(default=True, type='bool'), - state = dict(default='present', choices=['absent', 'present']) - )) - module = AnsibleModule(argument_spec=argument_spec) - - if module.params['provider_network_type'] in ['vlan' , 'flat']: - if not module.params['provider_physical_network']: - module.fail_json(msg = " for vlan and flat networks, variable provider_physical_network should be set.") - - if module.params['provider_network_type'] in ['vlan', 'gre']: - if not module.params['provider_segmentation_id']: - module.fail_json(msg = " for vlan & gre networks, variable provider_segmentation_id should be set.") - - neutron = _get_neutron_client(module, module.params) - - _set_tenant_id(module) - - if module.params['state'] == 'present': - network_id = _get_net_id(neutron, module) - if not network_id: - network_id = _create_network(module, neutron) - module.exit_json(changed = True, result = "Created", id = network_id) - else: - module.exit_json(changed = False, result = "Success", id = network_id) - - if module.params['state'] == 'absent': - network_id = _get_net_id(neutron, module) - if not network_id: - module.exit_json(changed = False, result = "Success") - else: - _delete_network(module, network_id, neutron) - module.exit_json(changed = True, result = "Deleted") - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/quantum_router b/library/cloud/quantum_router deleted file mode 100644 index 38d479128f2..00000000000 --- a/library/cloud/quantum_router +++ /dev/null @@ -1,210 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - try: - from neutronclient.neutron import client - except ImportError: - from quantumclient.quantum import client - from keystoneclient.v2_0 import client as ksclient -except ImportError: - print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'") - -DOCUMENTATION = ''' ---- -module: quantum_router -version_added: "1.2" -short_description: Create or Remove router from openstack -description: - - Create or Delete routers from OpenStack -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: 'yes' - auth_url: - description: - - The keystone url for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - name: - description: - - Name to be give to the router - required: true - default: None - tenant_name: - description: - - Name of the tenant for which the router has to be created, if none router would be created for the login tenant. - required: false - default: None - admin_state_up: - description: - - desired admin state of the created router . - required: false - default: true -requirements: ["quantumclient", "neutronclient", "keystoneclient"] -''' - -EXAMPLES = ''' -# Creates a router for tenant admin -- quantum_router: state=present - login_username=admin - login_password=admin - login_tenant_name=admin - name=router1" -''' - -_os_keystone = None -_os_tenant_id = None - -def _get_ksclient(module, kwargs): - try: - kclient = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) - global _os_keystone - _os_keystone = kclient - return kclient - - -def _get_endpoint(module, ksclient): - try: - endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: - module.fail_json(msg = "Error getting network endpoint: %s" % e.message) - return endpoint - -def _get_neutron_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint = _get_endpoint(module, _ksclient) - kwargs = { - 'token': token, - 'endpoint_url': endpoint - } - try: - neutron = client.Client('2.0', **kwargs) - except Exception, e: - module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) - return neutron - -def _set_tenant_id(module): - global _os_tenant_id - if not module.params['tenant_name']: - login_tenant_name = module.params['login_tenant_name'] - else: - login_tenant_name = module.params['tenant_name'] - - for tenant in _os_keystone.tenants.list(): - if tenant.name == login_tenant_name: - _os_tenant_id = tenant.id - break - if not _os_tenant_id: - module.fail_json(msg = "The tenant id cannot be found, please check the parameters") - - -def _get_router_id(module, neutron): - kwargs = { - 'name': module.params['name'], - 'tenant_id': _os_tenant_id, - } - try: - routers = neutron.list_routers(**kwargs) - except Exception, e: - module.fail_json(msg = "Error in getting the router list: %s " % e.message) - if not routers['routers']: - return None - return routers['routers'][0]['id'] - -def _create_router(module, neutron): - router = { - 'name': module.params['name'], - 'tenant_id': _os_tenant_id, - 'admin_state_up': module.params['admin_state_up'], - } - try: - new_router = neutron.create_router(dict(router=router)) - except Exception, e: - module.fail_json( msg = "Error in creating router: %s" % e.message) - return new_router['router']['id'] - -def _delete_router(module, neutron, router_id): - try: - neutron.delete_router(router_id) - except: - module.fail_json("Error in deleting the router") - return True - -def main(): - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - name = dict(required=True), - tenant_name = dict(default=None), - state = dict(default='present', choices=['absent', 'present']), - admin_state_up = dict(type='bool', default=True), - )) - module = AnsibleModule(argument_spec=argument_spec) - - neutron = _get_neutron_client(module, module.params) - _set_tenant_id(module) - - if module.params['state'] == 'present': - router_id = _get_router_id(module, neutron) - if not router_id: - router_id = _create_router(module, neutron) - module.exit_json(changed=True, result="Created", id=router_id) - else: - module.exit_json(changed=False, result="success" , id=router_id) - - else: - router_id = _get_router_id(module, neutron) - if not router_id: - module.exit_json(changed=False, result="success") - else: - _delete_router(module, neutron, router_id) - module.exit_json(changed=True, result="deleted") - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/quantum_router_gateway b/library/cloud/quantum_router_gateway deleted file mode 100644 index 5de19fd4785..00000000000 --- a/library/cloud/quantum_router_gateway +++ /dev/null @@ -1,213 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - try: - from neutronclient.neutron import client - except ImportError: - from quantumclient.quantum import client - from keystoneclient.v2_0 import client as ksclient -except ImportError: - print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'") -DOCUMENTATION = ''' ---- -module: quantum_router_gateway -version_added: "1.2" -short_description: set/unset a gateway interface for the router with the specified external network -description: - - Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic. -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: 'yes' - auth_url: - description: - - The keystone URL for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - router_name: - description: - - Name of the router to which the gateway should be attached. - required: true - default: None - network_name: - description: - - Name of the external network which should be attached to the router. - required: true - default: None -requirements: ["quantumclient", "neutronclient", "keystoneclient"] -''' - -EXAMPLES = ''' -# Attach an external network with a router to allow flow of external traffic -- quantum_router_gateway: state=present login_username=admin login_password=admin - login_tenant_name=admin router_name=external_router - network_name=external_network -''' - -_os_keystone = None -def _get_ksclient(module, kwargs): - try: - kclient = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) - global _os_keystone - _os_keystone = kclient - return kclient - - -def _get_endpoint(module, ksclient): - try: - endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: - module.fail_json(msg = "Error getting network endpoint: %s" % e.message) - return endpoint - -def _get_neutron_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint = _get_endpoint(module, _ksclient) - kwargs = { - 'token': token, - 'endpoint_url': endpoint - } - try: - neutron = client.Client('2.0', **kwargs) - except Exception, e: - module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) - return neutron - -def _get_router_id(module, neutron): - kwargs = { - 'name': module.params['router_name'], - } - try: - routers = neutron.list_routers(**kwargs) - except Exception, e: - module.fail_json(msg = "Error in getting the router list: %s " % e.message) - if not routers['routers']: - return None - return routers['routers'][0]['id'] - -def _get_net_id(neutron, module): - kwargs = { - 'name': module.params['network_name'], - 'router:external': True - } - try: - networks = neutron.list_networks(**kwargs) - except Exception, e: - module.fail_json("Error in listing neutron networks: %s" % e.message) - if not networks['networks']: - return None - return networks['networks'][0]['id'] - -def _get_port_id(neutron, module, router_id, network_id): - kwargs = { - 'device_id': router_id, - 'network_id': network_id, - } - try: - ports = neutron.list_ports(**kwargs) - except Exception, e: - module.fail_json( msg = "Error in listing ports: %s" % e.message) - if not ports['ports']: - return None - return ports['ports'][0]['id'] - -def _add_gateway_router(neutron, module, router_id, network_id): - kwargs = { - 'network_id': network_id - } - try: - neutron.add_gateway_router(router_id, kwargs) - except Exception, e: - module.fail_json(msg = "Error in adding gateway to router: %s" % e.message) - return True - -def _remove_gateway_router(neutron, module, router_id): - try: - neutron.remove_gateway_router(router_id) - except Exception, e: - module.fail_json(msg = "Error in removing gateway to router: %s" % e.message) - return True - -def main(): - - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - router_name = dict(required=True), - network_name = dict(required=True), - state = dict(default='present', choices=['absent', 'present']), - )) - module = AnsibleModule(argument_spec=argument_spec) - - neutron = _get_neutron_client(module, module.params) - router_id = _get_router_id(module, neutron) - - if not router_id: - module.fail_json(msg="failed to get the router id, please check the router name") - - network_id = _get_net_id(neutron, module) - if not network_id: - module.fail_json(msg="failed to get the network id, please check the network name and make sure it is external") - - if module.params['state'] == 'present': - port_id = _get_port_id(neutron, module, router_id, network_id) - if not port_id: - _add_gateway_router(neutron, module, router_id, network_id) - module.exit_json(changed=True, result="created") - module.exit_json(changed=False, result="success") - - if module.params['state'] == 'absent': - port_id = _get_port_id(neutron, module, router_id, network_id) - if not port_id: - module.exit_json(changed=False, result="Success") - _remove_gateway_router(neutron, module, router_id) - module.exit_json(changed=True, result="Deleted") - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/quantum_router_interface b/library/cloud/quantum_router_interface deleted file mode 100644 index c5828ad4106..00000000000 --- a/library/cloud/quantum_router_interface +++ /dev/null @@ -1,249 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - try: - from neutronclient.neutron import client - except ImportError: - from quantumclient.quantum import client - from keystoneclient.v2_0 import client as ksclient -except ImportError: - print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'") -DOCUMENTATION = ''' ---- -module: quantum_router_interface -version_added: "1.2" -short_description: Attach/Dettach a subnet's interface to a router -description: - - Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet. -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: 'yes' - auth_url: - description: - - The keystone URL for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - router_name: - description: - - Name of the router to which the subnet's interface should be attached. - required: true - default: None - subnet_name: - description: - - Name of the subnet to whose interface should be attached to the router. - required: true - default: None - tenant_name: - description: - - Name of the tenant whose subnet has to be attached. - required: false - default: None -requirements: ["quantumclient", "keystoneclient"] -''' - -EXAMPLES = ''' -# Attach tenant1's subnet to the external router -- quantum_router_interface: state=present login_username=admin - login_password=admin - login_tenant_name=admin - tenant_name=tenant1 - router_name=external_route - subnet_name=t1subnet -''' - - -_os_keystone = None -_os_tenant_id = None - -def _get_ksclient(module, kwargs): - try: - kclient = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) - global _os_keystone - _os_keystone = kclient - return kclient - - -def _get_endpoint(module, ksclient): - try: - endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: - module.fail_json(msg = "Error getting network endpoint: %s" % e.message) - return endpoint - -def _get_neutron_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint = _get_endpoint(module, _ksclient) - kwargs = { - 'token': token, - 'endpoint_url': endpoint - } - try: - neutron = client.Client('2.0', **kwargs) - except Exception, e: - module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) - return neutron - -def _set_tenant_id(module): - global _os_tenant_id - if not module.params['tenant_name']: - login_tenant_name = module.params['login_tenant_name'] - else: - login_tenant_name = module.params['tenant_name'] - - for tenant in _os_keystone.tenants.list(): - if tenant.name == login_tenant_name: - _os_tenant_id = tenant.id - break - if not _os_tenant_id: - module.fail_json(msg = "The tenant id cannot be found, please check the parameters") - - -def _get_router_id(module, neutron): - kwargs = { - 'name': module.params['router_name'], - } - try: - routers = neutron.list_routers(**kwargs) - except Exception, e: - module.fail_json(msg = "Error in getting the router list: %s " % e.message) - if not routers['routers']: - return None - return routers['routers'][0]['id'] - - -def _get_subnet_id(module, neutron): - subnet_id = None - kwargs = { - 'tenant_id': _os_tenant_id, - 'name': module.params['subnet_name'], - } - try: - subnets = neutron.list_subnets(**kwargs) - except Exception, e: - module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) - if not subnets['subnets']: - return None - return subnets['subnets'][0]['id'] - -def _get_port_id(neutron, module, router_id, subnet_id): - kwargs = { - 'tenant_id': _os_tenant_id, - 'device_id': router_id, - } - try: - ports = neutron.list_ports(**kwargs) - except Exception, e: - module.fail_json( msg = "Error in listing ports: %s" % e.message) - if not ports['ports']: - return None - for port in ports['ports']: - for subnet in port['fixed_ips']: - if subnet['subnet_id'] == subnet_id: - return port['id'] - return None - -def _add_interface_router(neutron, module, router_id, subnet_id): - kwargs = { - 'subnet_id': subnet_id - } - try: - neutron.add_interface_router(router_id, kwargs) - except Exception, e: - module.fail_json(msg = "Error in adding interface to router: %s" % e.message) - return True - -def _remove_interface_router(neutron, module, router_id, subnet_id): - kwargs = { - 'subnet_id': subnet_id - } - try: - neutron.remove_interface_router(router_id, kwargs) - except Exception, e: - module.fail_json(msg="Error in removing interface from router: %s" % e.message) - return True - -def main(): - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - router_name = dict(required=True), - subnet_name = dict(required=True), - tenant_name = dict(default=None), - state = dict(default='present', choices=['absent', 'present']), - )) - module = AnsibleModule(argument_spec=argument_spec) - - neutron = _get_neutron_client(module, module.params) - _set_tenant_id(module) - - router_id = _get_router_id(module, neutron) - if not router_id: - module.fail_json(msg="failed to get the router id, please check the router name") - - subnet_id = _get_subnet_id(module, neutron) - if not subnet_id: - module.fail_json(msg="failed to get the subnet id, please check the subnet name") - - if module.params['state'] == 'present': - port_id = _get_port_id(neutron, module, router_id, subnet_id) - if not port_id: - _add_interface_router(neutron, module, router_id, subnet_id) - module.exit_json(changed=True, result="created", id=port_id) - module.exit_json(changed=False, result="success", id=port_id) - - if module.params['state'] == 'absent': - port_id = _get_port_id(neutron, module, router_id, subnet_id) - if not port_id: - module.exit_json(changed = False, result = "Success") - _remove_interface_router(neutron, module, router_id, subnet_id) - module.exit_json(changed=True, result="Deleted") - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/quantum_subnet b/library/cloud/quantum_subnet deleted file mode 100644 index e38b2c94aa6..00000000000 --- a/library/cloud/quantum_subnet +++ /dev/null @@ -1,291 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - try: - from neutronclient.neutron import client - except ImportError: - from quantumclient.quantum import client - from keystoneclient.v2_0 import client as ksclient -except ImportError: - print("failed=True msg='quantumclient (or neutronclient) and keystoneclient are required'") - -DOCUMENTATION = ''' ---- -module: quantum_subnet -version_added: "1.2" -short_description: Add/remove subnet from a network -description: - - Add/remove subnet from a network -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: True - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: True - auth_url: - description: - - The keystone URL for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - network_name: - description: - - Name of the network to which the subnet should be attached - required: true - default: None - name: - description: - - The name of the subnet that should be created - required: true - default: None - cidr: - description: - - The CIDR representation of the subnet that should be assigned to the subnet - required: true - default: None - tenant_name: - description: - - The name of the tenant for whom the subnet should be created - required: false - default: None - ip_version: - description: - - The IP version of the subnet 4 or 6 - required: false - default: 4 - enable_dhcp: - description: - - Whether DHCP should be enabled for this subnet. - required: false - default: true - gateway_ip: - description: - - The ip that would be assigned to the gateway for this subnet - required: false - default: None - dns_nameservers: - description: - - DNS nameservers for this subnet, comma-separated - required: false - default: None - version_added: "1.4" - allocation_pool_start: - description: - - From the subnet pool the starting address from which the IP should be allocated - required: false - default: None - allocation_pool_end: - description: - - From the subnet pool the last IP that should be assigned to the virtual machines - required: false - default: None -requirements: ["quantumclient", "neutronclient", "keystoneclient"] -''' - -EXAMPLES = ''' -# Create a subnet for a tenant with the specified subnet -- quantum_subnet: state=present login_username=admin login_password=admin - login_tenant_name=admin tenant_name=tenant1 - network_name=network1 name=net1subnet cidr=192.168.0.0/24" -''' - -_os_keystone = None -_os_tenant_id = None -_os_network_id = None - -def _get_ksclient(module, kwargs): - try: - kclient = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message) - global _os_keystone - _os_keystone = kclient - return kclient - - -def _get_endpoint(module, ksclient): - try: - endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: - module.fail_json(msg = "Error getting network endpoint: %s" % e.message) - return endpoint - -def _get_neutron_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint = _get_endpoint(module, _ksclient) - kwargs = { - 'token': token, - 'endpoint_url': endpoint - } - try: - neutron = client.Client('2.0', **kwargs) - except Exception, e: - module.fail_json(msg = " Error in connecting to neutron: %s" % e.message) - return neutron - -def _set_tenant_id(module): - global _os_tenant_id - if not module.params['tenant_name']: - tenant_name = module.params['login_tenant_name'] - else: - tenant_name = module.params['tenant_name'] - - for tenant in _os_keystone.tenants.list(): - if tenant.name == tenant_name: - _os_tenant_id = tenant.id - break - if not _os_tenant_id: - module.fail_json(msg = "The tenant id cannot be found, please check the parameters") - -def _get_net_id(neutron, module): - kwargs = { - 'tenant_id': _os_tenant_id, - 'name': module.params['network_name'], - } - try: - networks = neutron.list_networks(**kwargs) - except Exception, e: - module.fail_json("Error in listing neutron networks: %s" % e.message) - if not networks['networks']: - return None - return networks['networks'][0]['id'] - - -def _get_subnet_id(module, neutron): - global _os_network_id - subnet_id = None - _os_network_id = _get_net_id(neutron, module) - if not _os_network_id: - module.fail_json(msg = "network id of network not found.") - else: - kwargs = { - 'tenant_id': _os_tenant_id, - 'name': module.params['name'], - } - try: - subnets = neutron.list_subnets(**kwargs) - except Exception, e: - module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) - if not subnets['subnets']: - return None - return subnets['subnets'][0]['id'] - -def _create_subnet(module, neutron): - neutron.format = 'json' - subnet = { - 'name': module.params['name'], - 'ip_version': module.params['ip_version'], - 'enable_dhcp': module.params['enable_dhcp'], - 'tenant_id': _os_tenant_id, - 'gateway_ip': module.params['gateway_ip'], - 'dns_nameservers': module.params['dns_nameservers'], - 'network_id': _os_network_id, - 'cidr': module.params['cidr'], - } - if module.params['allocation_pool_start'] and module.params['allocation_pool_end']: - allocation_pools = [ - { - 'start' : module.params['allocation_pool_start'], - 'end' : module.params['allocation_pool_end'] - } - ] - subnet.update({'allocation_pools': allocation_pools}) - if not module.params['gateway_ip']: - subnet.pop('gateway_ip') - if module.params['dns_nameservers']: - subnet['dns_nameservers'] = module.params['dns_nameservers'].split(',') - else: - subnet.pop('dns_nameservers') - try: - new_subnet = neutron.create_subnet(dict(subnet=subnet)) - except Exception, e: - module.fail_json(msg = "Failure in creating subnet: %s" % e.message) - return new_subnet['subnet']['id'] - - -def _delete_subnet(module, neutron, subnet_id): - try: - neutron.delete_subnet(subnet_id) - except Exception, e: - module.fail_json( msg = "Error in deleting subnet: %s" % e.message) - return True - - -def main(): - - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - name = dict(required=True), - network_name = dict(required=True), - cidr = dict(required=True), - tenant_name = dict(default=None), - state = dict(default='present', choices=['absent', 'present']), - ip_version = dict(default='4', choices=['4', '6']), - enable_dhcp = dict(default='true', type='bool'), - gateway_ip = dict(default=None), - dns_nameservers = dict(default=None), - allocation_pool_start = dict(default=None), - allocation_pool_end = dict(default=None), - )) - module = AnsibleModule(argument_spec=argument_spec) - neutron = _get_neutron_client(module, module.params) - _set_tenant_id(module) - if module.params['state'] == 'present': - subnet_id = _get_subnet_id(module, neutron) - if not subnet_id: - subnet_id = _create_subnet(module, neutron) - module.exit_json(changed = True, result = "Created" , id = subnet_id) - else: - module.exit_json(changed = False, result = "success" , id = subnet_id) - else: - subnet_id = _get_subnet_id(module, neutron) - if not subnet_id: - module.exit_json(changed = False, result = "success") - else: - _delete_subnet(module, neutron, subnet_id) - module.exit_json(changed = True, result = "deleted") - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/rax b/library/cloud/rax deleted file mode 100644 index d7db2c63d7e..00000000000 --- a/library/cloud/rax +++ /dev/null @@ -1,711 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax -short_description: create / delete an instance in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud instance and optionally - waits for it to be 'running'. -version_added: "1.2" -options: - auto_increment: - description: - - Whether or not to increment a single number with the name of the - created servers. Only applicable when used with the I(group) attribute - or meta key. - default: yes - choices: - - "yes" - - "no" - version_added: 1.5 - config_drive: - description: - - Attach read-only configuration drive to server as label config-2 - default: no - choices: - - "yes" - - "no" - version_added: 1.7 - count: - description: - - number of instances to launch - default: 1 - version_added: 1.4 - count_offset: - description: - - number count to start at - default: 1 - version_added: 1.4 - disk_config: - description: - - Disk partitioning strategy - choices: - - auto - - manual - version_added: '1.4' - default: auto - exact_count: - description: - - Explicitly ensure an exact count of instances, used with - state=active/present - default: no - choices: - - "yes" - - "no" - version_added: 1.4 - extra_client_args: - description: - - A hash of key/value pairs to be used when creating the cloudservers - client. This is considered an advanced option, use it wisely and - with caution. - version_added: 1.6 - extra_create_args: - description: - - A hash of key/value pairs to be used when creating a new server. - This is considered an advanced option, use it wisely and with caution. - version_added: 1.6 - files: - description: - - Files to insert into the instance. remotefilename:localcontent - default: null - flavor: - description: - - flavor to use for the instance - default: null - group: - description: - - host group to assign to server, is also used for idempotent operations - to ensure a specific number of instances - version_added: 1.4 - image: - description: - - image to use for the instance. Can be an C(id), C(human_id) or C(name) - default: null - instance_ids: - description: - - list of instance ids, currently only used when state='absent' to - remove instances - version_added: 1.4 - key_name: - description: - - key pair to use on the instance - default: null - aliases: - - keypair - meta: - description: - - A hash of metadata to associate with the instance - default: null - name: - description: - - Name to give the instance - default: null - networks: - description: - - The network to attach to the instances. If specified, you must include - ALL networks including the public and private interfaces. Can be C(id) - or C(label). - default: - - public - - private - version_added: 1.4 - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - user_data: - description: - - Data to be uploaded to the servers config drive. This option implies - I(config_drive). Can be a file path or a string - version_added: 1.7 - wait: - description: - - wait for the instance to be in state 'running' before returning - default: "no" - choices: - - "yes" - - "no" - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 -author: Jesse Keating, Matt Martz -extends_documentation_fragment: rackspace.openstack -''' - -EXAMPLES = ''' -- name: Build a Cloud Server - gather_facts: False - tasks: - - name: Server build request - local_action: - module: rax - credentials: ~/.raxpub - name: rax-test1 - flavor: 5 - image: b11d9567-e412-4255-96b9-bd63ab23bcfe - files: - /root/.ssh/authorized_keys: /home/localuser/.ssh/id_rsa.pub - /root/test.txt: /home/localuser/test.txt - wait: yes - state: present - networks: - - private - - public - register: rax - -- name: Build an exact count of cloud servers with incremented names - hosts: local - gather_facts: False - tasks: - - name: Server build requests - local_action: - module: rax - credentials: ~/.raxpub - name: test%03d.example.org - flavor: performance1-1 - image: ubuntu-1204-lts-precise-pangolin - state: present - count: 10 - count_offset: 10 - exact_count: yes - group: test - wait: yes - register: rax -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def create(module, names=[], flavor=None, image=None, meta={}, key_name=None, - files={}, wait=True, wait_timeout=300, disk_config=None, - group=None, nics=[], extra_create_args={}, user_data=None, - config_drive=False, existing=[]): - cs = pyrax.cloudservers - changed = False - - if user_data: - config_drive = True - - if user_data and os.path.isfile(user_data): - try: - f = open(user_data) - user_data = f.read() - f.close() - except Exception, e: - module.fail_json(msg='Failed to load %s' % user_data) - - # Handle the file contents - for rpath in files.keys(): - lpath = os.path.expanduser(files[rpath]) - try: - fileobj = open(lpath, 'r') - files[rpath] = fileobj.read() - fileobj.close() - except Exception, e: - module.fail_json(msg='Failed to load %s' % lpath) - try: - servers = [] - for name in names: - servers.append(cs.servers.create(name=name, image=image, - flavor=flavor, meta=meta, - key_name=key_name, - files=files, nics=nics, - disk_config=disk_config, - config_drive=config_drive, - userdata=user_data, - **extra_create_args)) - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - if wait: - end_time = time.time() + wait_timeout - infinite = wait_timeout == 0 - while infinite or time.time() < end_time: - for server in servers: - try: - server.get() - except: - server.status == 'ERROR' - - if not filter(lambda s: s.status not in FINAL_STATUSES, - servers): - break - time.sleep(5) - - success = [] - error = [] - timeout = [] - for server in servers: - try: - server.get() - except: - server.status == 'ERROR' - instance = rax_to_dict(server, 'server') - if server.status == 'ACTIVE' or not wait: - success.append(instance) - elif server.status == 'ERROR': - error.append(instance) - elif wait: - timeout.append(instance) - - untouched = [rax_to_dict(s, 'server') for s in existing] - instances = success + untouched - - results = { - 'changed': changed, - 'action': 'create', - 'instances': instances, - 'success': success, - 'error': error, - 'timeout': timeout, - 'instance_ids': { - 'instances': [i['id'] for i in instances], - 'success': [i['id'] for i in success], - 'error': [i['id'] for i in error], - 'timeout': [i['id'] for i in timeout] - } - } - - if timeout: - results['msg'] = 'Timeout waiting for all servers to build' - elif error: - results['msg'] = 'Failed to build all servers' - - if 'msg' in results: - module.fail_json(**results) - else: - module.exit_json(**results) - - -def delete(module, instance_ids=[], wait=True, wait_timeout=300, kept=[]): - cs = pyrax.cloudservers - - changed = False - instances = {} - servers = [] - - for instance_id in instance_ids: - servers.append(cs.servers.get(instance_id)) - - for server in servers: - try: - server.delete() - except Exception, e: - module.fail_json(msg=e.message) - else: - changed = True - - instance = rax_to_dict(server, 'server') - instances[instance['id']] = instance - - # If requested, wait for server deletion - if wait: - end_time = time.time() + wait_timeout - infinite = wait_timeout == 0 - while infinite or time.time() < end_time: - for server in servers: - instance_id = server.id - try: - server.get() - except: - instances[instance_id]['status'] = 'DELETED' - instances[instance_id]['rax_status'] = 'DELETED' - - if not filter(lambda s: s['status'] not in ('', 'DELETED', - 'ERROR'), - instances.values()): - break - - time.sleep(5) - - timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'), - instances.values()) - error = filter(lambda s: s['status'] in ('ERROR'), - instances.values()) - success = filter(lambda s: s['status'] in ('', 'DELETED'), - instances.values()) - - instances = [rax_to_dict(s, 'server') for s in kept] - - results = { - 'changed': changed, - 'action': 'delete', - 'instances': instances, - 'success': success, - 'error': error, - 'timeout': timeout, - 'instance_ids': { - 'instances': [i['id'] for i in instances], - 'success': [i['id'] for i in success], - 'error': [i['id'] for i in error], - 'timeout': [i['id'] for i in timeout] - } - } - - if timeout: - results['msg'] = 'Timeout waiting for all servers to delete' - elif error: - results['msg'] = 'Failed to delete all servers' - - if 'msg' in results: - module.fail_json(**results) - else: - module.exit_json(**results) - - -def cloudservers(module, state=None, name=None, flavor=None, image=None, - meta={}, key_name=None, files={}, wait=True, wait_timeout=300, - disk_config=None, count=1, group=None, instance_ids=[], - exact_count=False, networks=[], count_offset=0, - auto_increment=False, extra_create_args={}, user_data=None, - config_drive=False): - cs = pyrax.cloudservers - cnw = pyrax.cloud_networks - if not cnw: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - servers = [] - - # Add the group meta key - if group and 'group' not in meta: - meta['group'] = group - elif 'group' in meta and group is None: - group = meta['group'] - - # Normalize and ensure all metadata values are strings - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, basestring): - meta[k] = '%s' % v - - # When using state=absent with group, the absent block won't match the - # names properly. Use the exact_count functionality to decrease the count - # to the desired level - was_absent = False - if group is not None and state == 'absent': - exact_count = True - state = 'present' - was_absent = True - - if image: - image = rax_find_image(module, pyrax, image) - - nics = [] - if networks: - for network in networks: - nics.extend(rax_find_network(module, pyrax, network)) - - # act on the state - if state == 'present': - for arg, value in dict(name=name, flavor=flavor, - image=image).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax" module' % - arg) - - # Idempotent ensurance of a specific count of servers - if exact_count is not False: - # See if we can find servers that match our options - if group is None: - module.fail_json(msg='"group" must be provided when using ' - '"exact_count"') - else: - if auto_increment: - numbers = set() - - try: - name % 0 - except TypeError, e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message) - - pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) - for server in cs.servers.list(): - if server.metadata.get('group') == group: - servers.append(server) - match = re.search(pattern, server.name) - if match: - number = int(match.group(1)) - numbers.add(number) - - number_range = xrange(count_offset, count_offset + count) - available_numbers = list(set(number_range) - .difference(numbers)) - else: - for server in cs.servers.list(): - if server.metadata.get('group') == group: - servers.append(server) - - # If state was absent but the count was changed, - # assume we only wanted to remove that number of instances - if was_absent: - diff = len(servers) - count - if diff < 0: - count = 0 - else: - count = diff - - if len(servers) > count: - state = 'absent' - kept = servers[:count] - del servers[:count] - instance_ids = [] - for server in servers: - instance_ids.append(server.id) - delete(module, instance_ids=instance_ids, wait=wait, - wait_timeout=wait_timeout, kept=kept) - elif len(servers) < count: - if auto_increment: - names = [] - name_slice = count - len(servers) - numbers_to_use = available_numbers[:name_slice] - for number in numbers_to_use: - names.append(name % number) - else: - names = [name] * (count - len(servers)) - else: - instances = [] - instance_ids = [] - for server in servers: - instances.append(rax_to_dict(server, 'server')) - instance_ids.append(server.id) - module.exit_json(changed=False, action=None, - instances=instances, - success=[], error=[], timeout=[], - instance_ids={'instances': instance_ids, - 'success': [], 'error': [], - 'timeout': []}) - else: - if group is not None: - if auto_increment: - numbers = set() - - try: - name % 0 - except TypeError, e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message) - - pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) - for server in cs.servers.list(): - if server.metadata.get('group') == group: - servers.append(server) - match = re.search(pattern, server.name) - if match: - number = int(match.group(1)) - numbers.add(number) - - number_range = xrange(count_offset, - count_offset + count + len(numbers)) - available_numbers = list(set(number_range) - .difference(numbers)) - names = [] - numbers_to_use = available_numbers[:count] - for number in numbers_to_use: - names.append(name % number) - else: - names = [name] * count - else: - search_opts = { - 'name': '^%s$' % name, - 'image': image, - 'flavor': flavor - } - servers = [] - for server in cs.servers.list(search_opts=search_opts): - if server.metadata != meta: - continue - servers.append(server) - - if len(servers) >= count: - instances = [] - for server in servers: - instances.append(rax_to_dict(server, 'server')) - - instance_ids = [i['id'] for i in instances] - module.exit_json(changed=False, action=None, - instances=instances, success=[], error=[], - timeout=[], - instance_ids={'instances': instance_ids, - 'success': [], 'error': [], - 'timeout': []}) - - names = [name] * (count - len(servers)) - - create(module, names=names, flavor=flavor, image=image, - meta=meta, key_name=key_name, files=files, wait=wait, - wait_timeout=wait_timeout, disk_config=disk_config, group=group, - nics=nics, extra_create_args=extra_create_args, - user_data=user_data, config_drive=config_drive, - existing=servers) - - elif state == 'absent': - if instance_ids is None: - for arg, value in dict(name=name, flavor=flavor, - image=image).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax" ' - 'module' % arg) - search_opts = { - 'name': '^%s$' % name, - 'image': image, - 'flavor': flavor - } - for server in cs.servers.list(search_opts=search_opts): - if meta != server.metadata: - continue - servers.append(server) - - instance_ids = [] - for server in servers: - if len(instance_ids) < count: - instance_ids.append(server.id) - else: - break - - if not instance_ids: - module.exit_json(changed=False, action=None, instances=[], - success=[], error=[], timeout=[], - instance_ids={'instances': [], - 'success': [], 'error': [], - 'timeout': []}) - - delete(module, instance_ids=instance_ids, wait=wait, - wait_timeout=wait_timeout) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - auto_increment=dict(default=True, type='bool'), - config_drive=dict(default=False, type='bool'), - count=dict(default=1, type='int'), - count_offset=dict(default=1, type='int'), - disk_config=dict(choices=['auto', 'manual']), - exact_count=dict(default=False, type='bool'), - extra_client_args=dict(type='dict', default={}), - extra_create_args=dict(type='dict', default={}), - files=dict(type='dict', default={}), - flavor=dict(), - group=dict(), - image=dict(), - instance_ids=dict(type='list'), - key_name=dict(aliases=['keypair']), - meta=dict(type='dict', default={}), - name=dict(), - networks=dict(type='list', default=['public', 'private']), - service=dict(), - state=dict(default='present', choices=['present', 'absent']), - user_data=dict(no_log=True), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=300), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - service = module.params.get('service') - - if service is not None: - module.fail_json(msg='The "service" attribute has been deprecated, ' - 'please remove "service: cloudservers" from your ' - 'playbook pertaining to the "rax" module') - - auto_increment = module.params.get('auto_increment') - config_drive = module.params.get('config_drive') - count = module.params.get('count') - count_offset = module.params.get('count_offset') - disk_config = module.params.get('disk_config') - if disk_config: - disk_config = disk_config.upper() - exact_count = module.params.get('exact_count', False) - extra_client_args = module.params.get('extra_client_args') - extra_create_args = module.params.get('extra_create_args') - files = module.params.get('files') - flavor = module.params.get('flavor') - group = module.params.get('group') - image = module.params.get('image') - instance_ids = module.params.get('instance_ids') - key_name = module.params.get('key_name') - meta = module.params.get('meta') - name = module.params.get('name') - networks = module.params.get('networks') - state = module.params.get('state') - user_data = module.params.get('user_data') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - setup_rax_module(module, pyrax) - - if extra_client_args: - pyrax.cloudservers = pyrax.connect_to_cloudservers( - region=pyrax.cloudservers.client.region_name, - **extra_client_args) - client = pyrax.cloudservers.client - if 'bypass_url' in extra_client_args: - client.management_url = extra_client_args['bypass_url'] - - if pyrax.cloudservers is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - cloudservers(module, state=state, name=name, flavor=flavor, - image=image, meta=meta, key_name=key_name, files=files, - wait=wait, wait_timeout=wait_timeout, disk_config=disk_config, - count=count, group=group, instance_ids=instance_ids, - exact_count=exact_count, networks=networks, - count_offset=count_offset, auto_increment=auto_increment, - extra_create_args=extra_create_args, user_data=user_data, - config_drive=config_drive) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -# invoke the module -main() diff --git a/library/cloud/rax_cbs b/library/cloud/rax_cbs deleted file mode 100644 index a1b6ce46a6e..00000000000 --- a/library/cloud/rax_cbs +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_cbs -short_description: Manipulate Rackspace Cloud Block Storage Volumes -description: - - Manipulate Rackspace Cloud Block Storage Volumes -version_added: 1.6 -options: - description: - description: - - Description to give the volume being created - default: null - meta: - description: - - A hash of metadata to associate with the volume - default: null - name: - description: - - Name to give the volume being created - default: null - required: true - size: - description: - - Size of the volume to create in Gigabytes - default: 100 - required: true - snapshot_id: - description: - - The id of the snapshot to create the volume from - default: null - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - required: true - volume_type: - description: - - Type of the volume being created - choices: - - SATA - - SSD - default: SATA - required: true - wait: - description: - - wait for the volume to be in state 'available' before returning - default: "no" - choices: - - "yes" - - "no" - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 -author: Christopher H. Laco, Matt Martz -extends_documentation_fragment: rackspace.openstack -''' - -EXAMPLES = ''' -- name: Build a Block Storage Volume - gather_facts: False - hosts: local - connection: local - tasks: - - name: Storage volume create request - local_action: - module: rax_cbs - credentials: ~/.raxpub - name: my-volume - description: My Volume - volume_type: SSD - size: 150 - region: DFW - wait: yes - state: present - meta: - app: my-cool-app - register: my_volume -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def cloud_block_storage(module, state, name, description, meta, size, - snapshot_id, volume_type, wait, wait_timeout): - for arg in (state, name, size, volume_type): - if not arg: - module.fail_json(msg='%s is required for rax_cbs' % arg) - - if size < 100: - module.fail_json(msg='"size" must be greater than or equal to 100') - - changed = False - volume = None - instance = {} - - cbs = pyrax.cloud_blockstorage - - if cbs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - volume = rax_find_volume(module, pyrax, name) - - if state == 'present': - if not volume: - try: - volume = cbs.create(name, size=size, volume_type=volume_type, - description=description, - metadata=meta, - snapshot_id=snapshot_id) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - if wait: - attempts = wait_timeout / 5 - pyrax.utils.wait_for_build(volume, interval=5, - attempts=attempts) - - volume.get() - for key, value in vars(volume).iteritems(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value - - result = dict(changed=changed, volume=instance) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - elif wait and volume.status not in VOLUME_STATUS: - result['msg'] = 'Timeout waiting on %s' % volume.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - if volume: - try: - volume.delete() - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, volume=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - description=dict(), - meta=dict(type='dict', default={}), - name=dict(required=True), - size=dict(type='int', default=100), - snapshot_id=dict(), - state=dict(default='present', choices=['present', 'absent']), - volume_type=dict(choices=['SSD', 'SATA'], default='SATA'), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - description = module.params.get('description') - meta = module.params.get('meta') - name = module.params.get('name') - size = module.params.get('size') - snapshot_id = module.params.get('snapshot_id') - state = module.params.get('state') - volume_type = module.params.get('volume_type') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - - cloud_block_storage(module, state, name, description, meta, size, - snapshot_id, volume_type, wait, wait_timeout) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_cbs_attachments b/library/cloud/rax_cbs_attachments deleted file mode 100644 index 365f93cd6e2..00000000000 --- a/library/cloud/rax_cbs_attachments +++ /dev/null @@ -1,226 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_cbs_attachments -short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments -description: - - Manipulate Rackspace Cloud Block Storage Volume Attachments -version_added: 1.6 -options: - device: - description: - - The device path to attach the volume to, e.g. /dev/xvde - default: null - required: true - volume: - description: - - Name or id of the volume to attach/detach - default: null - required: true - server: - description: - - Name or id of the server to attach/detach - default: null - required: true - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - required: true - wait: - description: - - wait for the volume to be in 'in-use'/'available' state before returning - default: "no" - choices: - - "yes" - - "no" - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 -author: Christopher H. Laco, Matt Martz -extends_documentation_fragment: rackspace.openstack -''' - -EXAMPLES = ''' -- name: Attach a Block Storage Volume - gather_facts: False - hosts: local - connection: local - tasks: - - name: Storage volume attach request - local_action: - module: rax_cbs_attachments - credentials: ~/.raxpub - volume: my-volume - server: my-server - device: /dev/xvdd - region: DFW - wait: yes - state: present - register: my_volume -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def cloud_block_storage_attachments(module, state, volume, server, device, - wait, wait_timeout): - for arg in (state, volume, server, device): - if not arg: - module.fail_json(msg='%s is required for rax_cbs_attachments' % - arg) - - cbs = pyrax.cloud_blockstorage - cs = pyrax.cloudservers - - if cbs is None or cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - changed = False - instance = {} - - volume = rax_find_volume(module, pyrax, volume) - - if not volume: - module.fail_json(msg='No matching storage volumes were found') - - if state == 'present': - server = rax_find_server(module, pyrax, server) - - if (volume.attachments and - volume.attachments[0]['server_id'] == server.id): - changed = False - elif volume.attachments: - module.fail_json(msg='Volume is attached to another server') - else: - try: - volume.attach_to_instance(server, mountpoint=device) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - volume.get() - - for key, value in vars(volume).iteritems(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value - - result = dict(changed=changed, volume=instance) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - elif wait: - attempts = wait_timeout / 5 - pyrax.utils.wait_until(volume, 'status', 'in-use', - interval=5, attempts=attempts) - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - server = rax_find_server(module, pyrax, server) - - if (volume.attachments and - volume.attachments[0]['server_id'] == server.id): - try: - volume.detach() - if wait: - pyrax.utils.wait_until(volume, 'status', 'available', - interval=3, attempts=0, - verbose=False) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - volume.get() - changed = True - elif volume.attachments: - module.fail_json(msg='Volume is attached to another server') - - for key, value in vars(volume).iteritems(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value - - result = dict(changed=changed, volume=instance) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - module.exit_json(changed=changed, volume=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - device=dict(required=True), - volume=dict(required=True), - server=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - device = module.params.get('device') - volume = module.params.get('volume') - server = module.params.get('server') - state = module.params.get('state') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - - cloud_block_storage_attachments(module, state, volume, server, device, - wait, wait_timeout) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_cdb b/library/cloud/rax_cdb deleted file mode 100644 index 55e486f79e5..00000000000 --- a/library/cloud/rax_cdb +++ /dev/null @@ -1,238 +0,0 @@ -#!/usr/bin/python -tt -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_cdb -short_description: create/delete or resize a Rackspace Cloud Databases instance -description: - - creates / deletes or resize a Rackspace Cloud Databases instance - and optionally waits for it to be 'running'. The name option needs to be - unique since it's used to identify the instance. -version_added: "1.8" -options: - name: - description: - - Name of the databases server instance - default: null - flavor: - description: - - flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB) - default: 1 - volume: - description: - - Volume size of the database 1-150GB - default: 2 - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - wait: - description: - - wait for the instance to be in state 'running' before returning - default: "no" - choices: [ "yes", "no" ] - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 -author: Simon JAILLET -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: Build a Cloud Databases - gather_facts: False - tasks: - - name: Server build request - local_action: - module: rax_cdb - credentials: ~/.raxpub - region: IAD - name: db-server1 - flavor: 1 - volume: 2 - wait: yes - state: present - register: rax_db_server -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def find_instance(name): - - cdb = pyrax.cloud_databases - instances = cdb.list() - if instances: - for instance in instances: - if instance.name == name: - return instance - return False - - -def save_instance(module, name, flavor, volume, wait, wait_timeout): - - for arg, value in dict(name=name, flavor=flavor, - volume=volume).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb"' - ' module' % arg) - - if not (volume >= 1 and volume <= 150): - module.fail_json(msg='volume is required to be between 1 and 150') - - cdb = pyrax.cloud_databases - - flavors = [] - for item in cdb.list_flavors(): - flavors.append(item.id) - - if not (flavor in flavors): - module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor)) - - changed = False - - instance = find_instance(name) - - if not instance: - action = 'create' - try: - instance = cdb.create(name=name, flavor=flavor, volume=volume) - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - else: - action = None - - if instance.volume.size != volume: - action = 'resize' - if instance.volume.size > volume: - module.fail_json(changed=False, action=action, - msg='The new volume size must be larger than ' - 'the current volume size', - cdb=rax_to_dict(instance)) - instance.resize_volume(volume) - changed = True - - if int(instance.flavor.id) != flavor: - action = 'resize' - pyrax.utils.wait_until(instance, 'status', 'ACTIVE', - attempts=wait_timeout) - instance.resize(flavor) - changed = True - - if wait: - pyrax.utils.wait_until(instance, 'status', 'ACTIVE', - attempts=wait_timeout) - - if wait and instance.status != 'ACTIVE': - module.fail_json(changed=changed, action=action, - cdb=rax_to_dict(instance), - msg='Timeout waiting for "%s" databases instance to ' - 'be created' % name) - - module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance)) - - -def delete_instance(module, name, wait, wait_timeout): - - if not name: - module.fail_json(msg='name is required for the "rax_cdb" module') - - changed = False - - instance = find_instance(name) - if not instance: - module.exit_json(changed=False, action='delete') - - try: - instance.delete() - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - if wait: - pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN', - attempts=wait_timeout) - - if wait and instance.status != 'SHUTDOWN': - module.fail_json(changed=changed, action='delete', - cdb=rax_to_dict(instance), - msg='Timeout waiting for "%s" databases instance to ' - 'be deleted' % name) - - module.exit_json(changed=changed, action='delete', - cdb=rax_to_dict(instance)) - - -def rax_cdb(module, state, name, flavor, volume, wait, wait_timeout): - - # act on the state - if state == 'present': - save_instance(module, name, flavor, volume, wait, wait_timeout) - elif state == 'absent': - delete_instance(module, name, wait, wait_timeout) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(type='str', required=True), - flavor=dict(type='int', default=1), - volume=dict(type='int', default=2), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - flavor = module.params.get('flavor') - volume = module.params.get('volume') - state = module.params.get('state') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - rax_cdb(module, state, name, flavor, volume, wait, wait_timeout) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -# invoke the module -main() diff --git a/library/cloud/rax_cdb_database b/library/cloud/rax_cdb_database deleted file mode 100644 index 421b6dcb094..00000000000 --- a/library/cloud/rax_cdb_database +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/python -tt -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' -module: rax_cdb_database -short_description: 'create / delete a database in the Cloud Databases' -description: - - create / delete a database in the Cloud Databases. -version_added: "1.8" -options: - cdb_id: - description: - - The databases server UUID - default: null - name: - description: - - Name to give to the database - default: null - character_set: - description: - - Set of symbols and encodings - default: 'utf8' - collate: - description: - - Set of rules for comparing characters in a character set - default: 'utf8_general_ci' - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present -author: Simon JAILLET -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: Build a database in Cloud Databases - tasks: - - name: Database build request - local_action: - module: rax_cdb_database - credentials: ~/.raxpub - region: IAD - cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66 - name: db1 - state: present - register: rax_db_database -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def find_database(instance, name): - try: - database = instance.get_database(name) - except Exception: - return False - - return database - - -def save_database(module, cdb_id, name, character_set, collate): - - for arg, value in dict(cdb_id=cdb_id, name=name).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_database" ' - 'module' % arg) - - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - changed = False - - database = find_database(instance, name) - - if not database: - try: - database = instance.create_database(name=name, - character_set=character_set, - collate=collate) - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='create', - database=rax_to_dict(database)) - - -def delete_database(module, cdb_id, name): - - for arg, value in dict(cdb_id=cdb_id, name=name).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_database" ' - 'module' % arg) - - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - changed = False - - database = find_database(instance, name) - - if database: - try: - database.delete() - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='delete') - - -def rax_cdb_database(module, state, cdb_id, name, character_set, collate): - - # act on the state - if state == 'present': - save_database(module, cdb_id, name, character_set, collate) - elif state == 'absent': - delete_database(module, cdb_id, name) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - cdb_id=dict(type='str', required=True), - name=dict(type='str', required=True), - character_set=dict(type='str', default='utf8'), - collate=dict(type='str', default='utf8_general_ci'), - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - cdb_id = module.params.get('cdb_id') - name = module.params.get('name') - character_set = module.params.get('character_set') - collate = module.params.get('collate') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - rax_cdb_database(module, state, cdb_id, name, character_set, collate) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -# invoke the module -main() diff --git a/library/cloud/rax_cdb_user b/library/cloud/rax_cdb_user deleted file mode 100644 index a0958084c92..00000000000 --- a/library/cloud/rax_cdb_user +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/python -tt -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_cdb_user -short_description: create / delete a Rackspace Cloud Database -description: - - create / delete a database in the Cloud Databases. -version_added: "1.8" -options: - cdb_id: - description: - - The databases server UUID - default: null - db_username: - description: - - Name of the database user - default: null - db_password: - description: - - Database user password - default: null - databases: - description: - - Name of the databases that the user can access - default: [] - host: - description: - - Specifies the host from which a user is allowed to connect to - the database. Possible values are a string containing an IPv4 address - or "%" to allow connecting from any host - default: '%' - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present -author: Simon JAILLET -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: Build a user in Cloud Databases - tasks: - - name: User build request - local_action: - module: rax_cdb_user - credentials: ~/.raxpub - region: IAD - cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66 - db_username: user1 - db_password: user1 - databases: ['db1'] - state: present - register: rax_db_user -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def find_user(instance, name): - try: - user = instance.get_user(name) - except Exception: - return False - - return user - - -def save_user(module, cdb_id, name, password, databases, host): - - for arg, value in dict(cdb_id=cdb_id, name=name).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_user" ' - 'module' % arg) - - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - changed = False - - user = find_user(instance, name) - - if not user: - action = 'create' - try: - user = instance.create_user(name=name, - password=password, - database_names=databases, - host=host) - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - else: - action = 'update' - - if user.host != host: - changed = True - - user.update(password=password, host=host) - - former_dbs = set([item.name for item in user.list_user_access()]) - databases = set(databases) - - if databases != former_dbs: - try: - revoke_dbs = [db for db in former_dbs if db not in databases] - user.revoke_user_access(db_names=revoke_dbs) - - new_dbs = [db for db in databases if db not in former_dbs] - user.grant_user_access(db_names=new_dbs) - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action=action, user=rax_to_dict(user)) - - -def delete_user(module, cdb_id, name): - - for arg, value in dict(cdb_id=cdb_id, name=name).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_user"' - ' module' % arg) - - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - changed = False - - user = find_user(instance, name) - - if user: - try: - user.delete() - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='delete') - - -def rax_cdb_user(module, state, cdb_id, name, password, databases, host): - - # act on the state - if state == 'present': - save_user(module, cdb_id, name, password, databases, host) - elif state == 'absent': - delete_user(module, cdb_id, name) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - cdb_id=dict(type='str', required=True), - db_username=dict(type='str', required=True), - db_password=dict(type='str', required=True, no_log=True), - databases=dict(type='list', default=[]), - host=dict(type='str', default='%'), - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - cdb_id = module.params.get('cdb_id') - name = module.params.get('db_username') - password = module.params.get('db_password') - databases = module.params.get('databases') - host = unicode(module.params.get('host')) - state = module.params.get('state') - - setup_rax_module(module, pyrax) - rax_cdb_user(module, state, cdb_id, name, password, databases, host) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -# invoke the module -main() diff --git a/library/cloud/rax_clb b/library/cloud/rax_clb deleted file mode 100644 index 7a2699709da..00000000000 --- a/library/cloud/rax_clb +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_clb -short_description: create / delete a load balancer in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud load balancer. -version_added: "1.4" -options: - algorithm: - description: - - algorithm for the balancer being created - choices: - - RANDOM - - LEAST_CONNECTIONS - - ROUND_ROBIN - - WEIGHTED_LEAST_CONNECTIONS - - WEIGHTED_ROUND_ROBIN - default: LEAST_CONNECTIONS - meta: - description: - - A hash of metadata to associate with the instance - default: null - name: - description: - - Name to give the load balancer - default: null - port: - description: - - Port for the balancer being created - default: 80 - protocol: - description: - - Protocol for the balancer being created - choices: - - DNS_TCP - - DNS_UDP - - FTP - - HTTP - - HTTPS - - IMAPS - - IMAPv4 - - LDAP - - LDAPS - - MYSQL - - POP3 - - POP3S - - SMTP - - TCP - - TCP_CLIENT_FIRST - - UDP - - UDP_STREAM - - SFTP - default: HTTP - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - timeout: - description: - - timeout for communication between the balancer and the node - default: 30 - type: - description: - - type of interface for the balancer being created - choices: - - PUBLIC - - SERVICENET - default: PUBLIC - vip_id: - description: - - Virtual IP ID to use when creating the load balancer for purposes of - sharing an IP with another load balancer of another protocol - version_added: 1.5 - wait: - description: - - wait for the balancer to be in state 'running' before returning - default: "no" - choices: - - "yes" - - "no" - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 -author: Christopher H. Laco, Matt Martz -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: Build a Load Balancer - gather_facts: False - hosts: local - connection: local - tasks: - - name: Load Balancer create request - local_action: - module: rax_clb - credentials: ~/.raxpub - name: my-lb - port: 8080 - protocol: HTTP - type: SERVICENET - timeout: 30 - region: DFW - wait: yes - state: present - meta: - app: my-cool-app - register: my_lb -''' - - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, - vip_type, timeout, wait, wait_timeout, vip_id): - for arg in (state, name, port, protocol, vip_type): - if not arg: - module.fail_json(msg='%s is required for rax_clb' % arg) - - if int(timeout) < 30: - module.fail_json(msg='"timeout" must be greater than or equal to 30') - - changed = False - balancers = [] - - clb = pyrax.cloud_loadbalancers - if not clb: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - for balancer in clb.list(): - if name != balancer.name and name != balancer.id: - continue - - balancers.append(balancer) - - if len(balancers) > 1: - module.fail_json(msg='Multiple Load Balancers were matched by name, ' - 'try using the Load Balancer ID instead') - - if state == 'present': - if isinstance(meta, dict): - metadata = [dict(key=k, value=v) for k, v in meta.items()] - - if not balancers: - try: - virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)] - balancer = clb.create(name, metadata=metadata, port=port, - algorithm=algorithm, protocol=protocol, - timeout=timeout, virtual_ips=virtual_ips) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - balancer = balancers[0] - setattr(balancer, 'metadata', - [dict(key=k, value=v) for k, v in - balancer.get_metadata().items()]) - atts = { - 'name': name, - 'algorithm': algorithm, - 'port': port, - 'protocol': protocol, - 'timeout': timeout - } - for att, value in atts.iteritems(): - current = getattr(balancer, att) - if current != value: - changed = True - - if changed: - balancer.update(**atts) - - if balancer.metadata != metadata: - balancer.set_metadata(meta) - changed = True - - virtual_ips = [clb.VirtualIP(type=vip_type)] - current_vip_types = set([v.type for v in balancer.virtual_ips]) - vip_types = set([v.type for v in virtual_ips]) - if current_vip_types != vip_types: - module.fail_json(msg='Load balancer Virtual IP type cannot ' - 'be changed') - - if wait: - attempts = wait_timeout / 5 - pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) - - balancer.get() - instance = rax_to_dict(balancer, 'clb') - - result = dict(changed=changed, balancer=instance) - - if balancer.status == 'ERROR': - result['msg'] = '%s failed to build' % balancer.id - elif wait and balancer.status not in ('ACTIVE', 'ERROR'): - result['msg'] = 'Timeout waiting on %s' % balancer.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - if balancers: - balancer = balancers[0] - try: - balancer.delete() - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - instance = rax_to_dict(balancer, 'clb') - - if wait: - attempts = wait_timeout / 5 - pyrax.utils.wait_until(balancer, 'status', ('DELETED'), - interval=5, attempts=attempts) - else: - instance = {} - - module.exit_json(changed=changed, balancer=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - algorithm=dict(choices=CLB_ALGORITHMS, - default='LEAST_CONNECTIONS'), - meta=dict(type='dict', default={}), - name=dict(), - port=dict(type='int', default=80), - protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'), - state=dict(default='present', choices=['present', 'absent']), - timeout=dict(type='int', default=30), - type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'), - vip_id=dict(), - wait=dict(type='bool'), - wait_timeout=dict(default=300), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - algorithm = module.params.get('algorithm') - meta = module.params.get('meta') - name = module.params.get('name') - port = module.params.get('port') - protocol = module.params.get('protocol') - state = module.params.get('state') - timeout = int(module.params.get('timeout')) - vip_id = module.params.get('vip_id') - vip_type = module.params.get('type') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - setup_rax_module(module, pyrax) - - cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, - vip_type, timeout, wait, wait_timeout, vip_id) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_clb_nodes b/library/cloud/rax_clb_nodes deleted file mode 100644 index 24325b44597..00000000000 --- a/library/cloud/rax_clb_nodes +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_clb_nodes -short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer -description: - - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer -version_added: "1.4" -options: - address: - required: false - description: - - IP address or domain name of the node - condition: - required: false - choices: - - enabled - - disabled - - draining - description: - - Condition for the node, which determines its role within the load - balancer - load_balancer_id: - required: true - type: integer - description: - - Load balancer id - node_id: - required: false - type: integer - description: - - Node id - port: - required: false - type: integer - description: - - Port number of the load balanced service on the node - state: - required: false - default: "present" - choices: - - present - - absent - description: - - Indicate desired state of the node - type: - required: false - choices: - - primary - - secondary - description: - - Type of node - wait: - required: false - default: "no" - choices: - - "yes" - - "no" - description: - - Wait for the load balancer to become active before returning - wait_timeout: - required: false - type: integer - default: 30 - description: - - How long to wait before giving up and returning an error - weight: - required: false - description: - - Weight of node -author: Lukasz Kawczynski -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -# Add a new node to the load balancer -- local_action: - module: rax_clb_nodes - load_balancer_id: 71 - address: 10.2.2.3 - port: 80 - condition: enabled - type: primary - wait: yes - credentials: /path/to/credentials - -# Drain connections from a node -- local_action: - module: rax_clb_nodes - load_balancer_id: 71 - node_id: 410 - condition: draining - wait: yes - credentials: /path/to/credentials - -# Remove a node from the load balancer -- local_action: - module: rax_clb_nodes - load_balancer_id: 71 - node_id: 410 - state: absent - wait: yes - credentials: /path/to/credentials -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def _activate_virtualenv(path): - path = os.path.expanduser(path) - activate_this = os.path.join(path, 'bin', 'activate_this.py') - execfile(activate_this, dict(__file__=activate_this)) - - -def _get_node(lb, node_id=None, address=None, port=None): - """Return a matching node""" - for node in getattr(lb, 'nodes', []): - match_list = [] - if node_id is not None: - match_list.append(getattr(node, 'id', None) == node_id) - if address is not None: - match_list.append(getattr(node, 'address', None) == address) - if port is not None: - match_list.append(getattr(node, 'port', None) == port) - - if match_list and all(match_list): - return node - - return None - - -def _is_primary(node): - """Return True if node is primary and enabled""" - return (node.type.lower() == 'primary' and - node.condition.lower() == 'enabled') - - -def _get_primary_nodes(lb): - """Return a list of primary and enabled nodes""" - nodes = [] - for node in lb.nodes: - if _is_primary(node): - nodes.append(node) - return nodes - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - condition=dict(choices=['enabled', 'disabled', 'draining']), - load_balancer_id=dict(required=True, type='int'), - node_id=dict(type='int'), - port=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']), - type=dict(choices=['primary', 'secondary']), - virtualenv=dict(), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=30, type='int'), - weight=dict(type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params['address'] - condition = (module.params['condition'] and - module.params['condition'].upper()) - load_balancer_id = module.params['load_balancer_id'] - node_id = module.params['node_id'] - port = module.params['port'] - state = module.params['state'] - typ = module.params['type'] and module.params['type'].upper() - virtualenv = module.params['virtualenv'] - wait = module.params['wait'] - wait_timeout = module.params['wait_timeout'] or 1 - weight = module.params['weight'] - - if virtualenv: - try: - _activate_virtualenv(virtualenv) - except IOError, e: - module.fail_json(msg='Failed to activate virtualenv %s (%s)' % ( - virtualenv, e)) - - setup_rax_module(module, pyrax) - - if not pyrax.cloud_loadbalancers: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - try: - lb = pyrax.cloud_loadbalancers.get(load_balancer_id) - except pyrax.exc.PyraxException, e: - module.fail_json(msg='%s' % e.message) - - node = _get_node(lb, node_id, address, port) - - result = rax_clb_node_to_dict(node) - - if state == 'absent': - if not node: # Removing a non-existent node - module.exit_json(changed=False, state=state) - - # The API detects this as well but currently pyrax does not return a - # meaningful error message - if _is_primary(node) and len(_get_primary_nodes(lb)) == 1: - module.fail_json( - msg='At least one primary node has to be enabled') - - try: - lb.delete_node(node) - result = {} - except pyrax.exc.NotFound: - module.exit_json(changed=False, state=state) - except pyrax.exc.PyraxException, e: - module.fail_json(msg='%s' % e.message) - else: # present - if not node: - if node_id: # Updating a non-existent node - msg = 'Node %d not found' % node_id - if lb.nodes: - msg += (' (available nodes: %s)' % - ', '.join([str(x.id) for x in lb.nodes])) - module.fail_json(msg=msg) - else: # Creating a new node - try: - node = pyrax.cloudloadbalancers.Node( - address=address, port=port, condition=condition, - weight=weight, type=typ) - resp, body = lb.add_nodes([node]) - result.update(body['nodes'][0]) - except pyrax.exc.PyraxException, e: - module.fail_json(msg='%s' % e.message) - else: # Updating an existing node - mutable = { - 'condition': condition, - 'type': typ, - 'weight': weight, - } - - for name, value in mutable.items(): - if value is None or value == getattr(node, name): - mutable.pop(name) - - if not mutable: - module.exit_json(changed=False, state=state, node=result) - - try: - # The diff has to be set explicitly to update node's weight and - # type; this should probably be fixed in pyrax - lb.update_node(node, diff=mutable) - result.update(mutable) - except pyrax.exc.PyraxException, e: - module.fail_json(msg='%s' % e.message) - - if wait: - pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1, - attempts=wait_timeout) - if lb.status != 'ACTIVE': - module.fail_json( - msg='Load balancer not active after %ds (current status: %s)' % - (wait_timeout, lb.status.lower())) - - kwargs = {'node': result} if result else {} - module.exit_json(changed=True, state=state, **kwargs) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_dns b/library/cloud/rax_dns deleted file mode 100644 index dacc4c672fe..00000000000 --- a/library/cloud/rax_dns +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_dns -short_description: Manage domains on Rackspace Cloud DNS -description: - - Manage domains on Rackspace Cloud DNS -version_added: 1.5 -options: - comment: - description: - - Brief description of the domain. Maximum length of 160 characters - email: - desctiption: - - Email address of the domain administrator - name: - description: - - Domain name to create - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - ttl: - description: - - Time to live of domain in seconds - default: 3600 -notes: - - "It is recommended that plays utilizing this module be run with - C(serial: 1) to avoid exceeding the API request limit imposed by - the Rackspace CloudDNS API" -author: Matt Martz -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: Create domain - hosts: all - gather_facts: False - tasks: - - name: Domain create request - local_action: - module: rax_dns - credentials: ~/.raxpub - name: example.org - email: admin@example.org - register: rax_dns -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def rax_dns(module, comment, email, name, state, ttl): - changed = False - - dns = pyrax.cloud_dns - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - if not email: - module.fail_json(msg='An "email" attribute is required for ' - 'creating a domain') - - try: - domain = dns.find(name=name) - except pyrax.exceptions.NoUniqueMatch, e: - module.fail_json(msg='%s' % e.message) - except pyrax.exceptions.NotFound: - try: - domain = dns.create(name=name, emailAddress=email, ttl=ttl, - comment=comment) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - update = {} - if comment != getattr(domain, 'comment', None): - update['comment'] = comment - if ttl != getattr(domain, 'ttl', None): - update['ttl'] = ttl - if email != getattr(domain, 'emailAddress', None): - update['emailAddress'] = email - - if update: - try: - domain.update(**update) - changed = True - domain.get() - except Exception, e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - domain = dns.find(name=name) - except pyrax.exceptions.NotFound: - domain = {} - pass - except Exception, e: - module.fail_json(msg='%s' % e.message) - - if domain: - try: - domain.delete() - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, domain=rax_to_dict(domain)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - comment=dict(), - email=dict(), - name=dict(), - state=dict(default='present', choices=['present', 'absent']), - ttl=dict(type='int', default=3600), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - comment = module.params.get('comment') - email = module.params.get('email') - name = module.params.get('name') - state = module.params.get('state') - ttl = module.params.get('ttl') - - setup_rax_module(module, pyrax, False) - - rax_dns(module, comment, email, name, state, ttl) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_dns_record b/library/cloud/rax_dns_record deleted file mode 100644 index a28f5b9a9b3..00000000000 --- a/library/cloud/rax_dns_record +++ /dev/null @@ -1,335 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_dns_record -short_description: Manage DNS records on Rackspace Cloud DNS -description: - - Manage DNS records on Rackspace Cloud DNS -version_added: 1.5 -options: - comment: - description: - - Brief description of the domain. Maximum length of 160 characters - data: - description: - - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for - SRV/TXT - required: True - domain: - description: - - Domain name to create the record in. This is an invalid option when - type=PTR - loadbalancer: - description: - - Load Balancer ID to create a PTR record for. Only used with type=PTR - version_added: 1.7 - name: - description: - - FQDN record name to create - required: True - priority: - description: - - Required for MX and SRV records, but forbidden for other record types. - If specified, must be an integer from 0 to 65535. - server: - description: - - Server ID to create a PTR record for. Only used with type=PTR - version_added: 1.7 - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - ttl: - description: - - Time to live of record in seconds - default: 3600 - type: - description: - - DNS record type - choices: - - A - - AAAA - - CNAME - - MX - - NS - - SRV - - TXT - - PTR - required: true -notes: - - "It is recommended that plays utilizing this module be run with - C(serial: 1) to avoid exceeding the API request limit imposed by - the Rackspace CloudDNS API" - - To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be - supplied - - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record. - - C(PTR) record support was added in version 1.7 -author: Matt Martz -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: Create DNS Records - hosts: all - gather_facts: False - tasks: - - name: Create A record - local_action: - module: rax_dns_record - credentials: ~/.raxpub - domain: example.org - name: www.example.org - data: "{{ rax_accessipv4 }}" - type: A - register: a_record - - - name: Create PTR record - local_action: - module: rax_dns_record - credentials: ~/.raxpub - server: "{{ rax_id }}" - name: "{{ inventory_hostname }}" - region: DFW - register: ptr_record -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None, - name=None, server=None, state='present', ttl=7200): - changed = False - results = [] - - dns = pyrax.cloud_dns - - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if loadbalancer: - item = rax_find_loadbalancer(module, pyrax, loadbalancer) - elif server: - item = rax_find_server(module, pyrax, server) - - if state == 'present': - current = dns.list_ptr_records(item) - for record in current: - if record.data == data: - if record.ttl != ttl or record.name != name: - try: - dns.update_ptr_record(item, record, name, data, ttl) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - record.ttl = ttl - record.name = name - results.append(rax_to_dict(record)) - break - else: - results.append(rax_to_dict(record)) - break - - if not results: - record = dict(name=name, type='PTR', data=data, ttl=ttl, - comment=comment) - try: - results = dns.add_ptr_records(item, [record]) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, records=results) - - elif state == 'absent': - current = dns.list_ptr_records(item) - for record in current: - if record.data == data: - results.append(rax_to_dict(record)) - break - - if results: - try: - dns.delete_ptr_records(item, data) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, records=results) - - -def rax_dns_record(module, comment=None, data=None, domain=None, name=None, - priority=None, record_type='A', state='present', ttl=7200): - """Function for manipulating record types other than PTR""" - - changed = False - - dns = pyrax.cloud_dns - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - if not priority and record_type in ['MX', 'SRV']: - module.fail_json(msg='A "priority" attribute is required for ' - 'creating a MX or SRV record') - - try: - domain = dns.find(name=domain) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - try: - record = domain.find_record(record_type, name=name) - except pyrax.exceptions.DomainRecordNotUnique, e: - module.fail_json(msg='%s' % e.message) - except pyrax.exceptions.DomainRecordNotFound, e: - try: - record_data = { - 'type': record_type, - 'name': name, - 'data': data, - 'ttl': ttl - } - if comment: - record_data.update(dict(comment=comment)) - if priority and record_type.upper() in ['MX', 'SRV']: - record_data.update(dict(priority=priority)) - - record = domain.add_records([record_data])[0] - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - update = {} - if comment != getattr(record, 'comment', None): - update['comment'] = comment - if ttl != getattr(record, 'ttl', None): - update['ttl'] = ttl - if priority != getattr(record, 'priority', None): - update['priority'] = priority - if data != getattr(record, 'data', None): - update['data'] = data - - if update: - try: - record.update(**update) - changed = True - record.get() - except Exception, e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - domain = dns.find(name=domain) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - try: - record = domain.find_record(record_type, name=name, data=data) - except pyrax.exceptions.DomainRecordNotFound, e: - record = {} - pass - except pyrax.exceptions.DomainRecordNotUnique, e: - module.fail_json(msg='%s' % e.message) - - if record: - try: - record.delete() - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, record=rax_to_dict(record)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - comment=dict(), - data=dict(required=True), - domain=dict(), - loadbalancer=dict(), - name=dict(required=True), - priority=dict(type='int'), - server=dict(), - state=dict(default='present', choices=['present', 'absent']), - ttl=dict(type='int', default=3600), - type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', - 'SRV', 'TXT', 'PTR']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[ - ['server', 'loadbalancer', 'domain'], - ], - required_one_of=[ - ['server', 'loadbalancer', 'domain'], - ], - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - comment = module.params.get('comment') - data = module.params.get('data') - domain = module.params.get('domain') - loadbalancer = module.params.get('loadbalancer') - name = module.params.get('name') - priority = module.params.get('priority') - server = module.params.get('server') - state = module.params.get('state') - ttl = module.params.get('ttl') - record_type = module.params.get('type') - - setup_rax_module(module, pyrax, False) - - if record_type.upper() == 'PTR': - if not server and not loadbalancer: - module.fail_json(msg='one of the following is required: ' - 'server,loadbalancer') - rax_dns_record_ptr(module, data=data, comment=comment, - loadbalancer=loadbalancer, name=name, server=server, - state=state, ttl=ttl) - else: - rax_dns_record(module, comment=comment, data=data, domain=domain, - name=name, priority=priority, record_type=record_type, - state=state, ttl=ttl) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_facts b/library/cloud/rax_facts deleted file mode 100644 index 68ef446f760..00000000000 --- a/library/cloud/rax_facts +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_facts -short_description: Gather facts for Rackspace Cloud Servers -description: - - Gather facts for Rackspace Cloud Servers. -version_added: "1.4" -options: - address: - description: - - Server IP address to retrieve facts for, will match any IP assigned to - the server - id: - description: - - Server ID to retrieve facts for - name: - description: - - Server name to retrieve facts for - default: null -author: Matt Martz -extends_documentation_fragment: rackspace.openstack -''' - -EXAMPLES = ''' -- name: Gather info about servers - hosts: all - gather_facts: False - tasks: - - name: Get facts about servers - local_action: - module: rax_facts - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW - - name: Map some facts - set_fact: - ansible_ssh_host: "{{ rax_accessipv4 }}" -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def rax_facts(module, address, name, server_id): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - ansible_facts = {} - - search_opts = {} - if name: - search_opts = dict(name='^%s$' % name) - try: - servers = cs.servers.list(search_opts=search_opts) - except Exception, e: - module.fail_json(msg='%s' % e.message) - elif address: - servers = [] - try: - for server in cs.servers.list(): - for addresses in server.networks.values(): - if address in addresses: - servers.append(server) - break - except Exception, e: - module.fail_json(msg='%s' % e.message) - elif server_id: - servers = [] - try: - servers.append(cs.servers.get(server_id)) - except Exception, e: - pass - - if len(servers) > 1: - module.fail_json(msg='Multiple servers found matching provided ' - 'search parameters') - elif len(servers) == 1: - ansible_facts = rax_to_dict(servers[0], 'server') - - module.exit_json(changed=changed, ansible_facts=ansible_facts) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - id=dict(), - name=dict(), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[['address', 'id', 'name']], - required_one_of=[['address', 'id', 'name']], - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params.get('address') - server_id = module.params.get('id') - name = module.params.get('name') - - setup_rax_module(module, pyrax) - - rax_facts(module, address, name, server_id) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_files b/library/cloud/rax_files deleted file mode 100644 index 3c54b0a9e2f..00000000000 --- a/library/cloud/rax_files +++ /dev/null @@ -1,379 +0,0 @@ -#!/usr/bin/python - -# (c) 2013, Paul Durivage -# -# This file is part of Ansible. -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_files -short_description: Manipulate Rackspace Cloud Files Containers -description: - - Manipulate Rackspace Cloud Files Containers -version_added: "1.5" -options: - clear_meta: - description: - - Optionally clear existing metadata when applying metadata to existing containers. - Selecting this option is only appropriate when setting type=meta - choices: - - "yes" - - "no" - default: "no" - container: - description: - - The container to use for container or metadata operations. - required: true - meta: - description: - - A hash of items to set as metadata values on a container - private: - description: - - Used to set a container as private, removing it from the CDN. B(Warning!) - Private containers, if previously made public, can have live objects - available until the TTL on cached objects expires - public: - description: - - Used to set a container as public, available via the Cloud Files CDN - region: - description: - - Region to create an instance in - default: DFW - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - ttl: - description: - - In seconds, set a container-wide TTL for all objects cached on CDN edge nodes. - Setting a TTL is only appropriate for containers that are public - type: - description: - - Type of object to do work on, i.e. metadata object or a container object - choices: - - file - - meta - default: file - web_error: - description: - - Sets an object to be presented as the HTTP error page when accessed by the CDN URL - web_index: - description: - - Sets an object to be presented as the HTTP index page when accessed by the CDN URL -author: Paul Durivage -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: "Test Cloud Files Containers" - hosts: local - gather_facts: no - tasks: - - name: "List all containers" - rax_files: state=list - - - name: "Create container called 'mycontainer'" - rax_files: container=mycontainer - - - name: "Create container 'mycontainer2' with metadata" - rax_files: - container: mycontainer2 - meta: - key: value - file_for: someuser@example.com - - - name: "Set a container's web index page" - rax_files: container=mycontainer web_index=index.html - - - name: "Set a container's web error page" - rax_files: container=mycontainer web_error=error.html - - - name: "Make container public" - rax_files: container=mycontainer public=yes - - - name: "Make container public with a 24 hour TTL" - rax_files: container=mycontainer public=yes ttl=86400 - - - name: "Make container private" - rax_files: container=mycontainer private=yes - -- name: "Test Cloud Files Containers Metadata Storage" - hosts: local - gather_facts: no - tasks: - - name: "Get mycontainer2 metadata" - rax_files: - container: mycontainer2 - type: meta - - - name: "Set mycontainer2 metadata" - rax_files: - container: mycontainer2 - type: meta - meta: - uploaded_by: someuser@example.com - - - name: "Remove mycontainer2 metadata" - rax_files: - container: "mycontainer2" - type: meta - state: absent - meta: - key: "" - file_for: "" -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError, e: - HAS_PYRAX = False - -EXIT_DICT = dict(success=True) -META_PREFIX = 'x-container-meta-' - - -def _get_container(module, cf, container): - try: - return cf.get_container(container) - except pyrax.exc.NoSuchContainer, e: - module.fail_json(msg=e.message) - - -def _fetch_meta(module, container): - EXIT_DICT['meta'] = dict() - try: - for k, v in container.get_metadata().items(): - split_key = k.split(META_PREFIX)[-1] - EXIT_DICT['meta'][split_key] = v - except Exception, e: - module.fail_json(msg=e.message) - - -def meta(cf, module, container_, state, meta_, clear_meta): - c = _get_container(module, cf, container_) - - if meta_ and state == 'present': - try: - meta_set = c.set_metadata(meta_, clear=clear_meta) - except Exception, e: - module.fail_json(msg=e.message) - elif meta_ and state == 'absent': - remove_results = [] - for k, v in meta_.items(): - c.remove_metadata_key(k) - remove_results.append(k) - EXIT_DICT['deleted_meta_keys'] = remove_results - elif state == 'absent': - remove_results = [] - for k, v in c.get_metadata().items(): - c.remove_metadata_key(k) - remove_results.append(k) - EXIT_DICT['deleted_meta_keys'] = remove_results - - _fetch_meta(module, c) - _locals = locals().keys() - - EXIT_DICT['container'] = c.name - if 'meta_set' in _locals or 'remove_results' in _locals: - EXIT_DICT['changed'] = True - - module.exit_json(**EXIT_DICT) - - -def container(cf, module, container_, state, meta_, clear_meta, ttl, public, - private, web_index, web_error): - if public and private: - module.fail_json(msg='container cannot be simultaneously ' - 'set to public and private') - - if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error): - module.fail_json(msg='state cannot be omitted when setting/removing ' - 'attributes on a container') - - if state == 'list': - # We don't care if attributes are specified, let's list containers - EXIT_DICT['containers'] = cf.list_containers() - module.exit_json(**EXIT_DICT) - - try: - c = cf.get_container(container_) - except pyrax.exc.NoSuchContainer, e: - # Make the container if state=present, otherwise bomb out - if state == 'present': - try: - c = cf.create_container(container_) - except Exception, e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['changed'] = True - EXIT_DICT['created'] = True - else: - module.fail_json(msg=e.message) - else: - # Successfully grabbed a container object - # Delete if state is absent - if state == 'absent': - try: - cont_deleted = c.delete() - except Exception, e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['deleted'] = True - - if meta_: - try: - meta_set = c.set_metadata(meta_, clear=clear_meta) - except Exception, e: - module.fail_json(msg=e.message) - finally: - _fetch_meta(module, c) - - if ttl: - try: - c.cdn_ttl = ttl - except Exception, e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['ttl'] = c.cdn_ttl - - if public: - try: - cont_public = c.make_public() - except Exception, e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['container_urls'] = dict(url=c.cdn_uri, - ssl_url=c.cdn_ssl_uri, - streaming_url=c.cdn_streaming_uri, - ios_uri=c.cdn_ios_uri) - - if private: - try: - cont_private = c.make_private() - except Exception, e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_private'] = True - - if web_index: - try: - cont_web_index = c.set_web_index_page(web_index) - except Exception, e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_index'] = True - finally: - _fetch_meta(module, c) - - if web_error: - try: - cont_err_index = c.set_web_error_page(web_error) - except Exception, e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_error'] = True - finally: - _fetch_meta(module, c) - - EXIT_DICT['container'] = c.name - EXIT_DICT['objs_in_container'] = c.object_count - EXIT_DICT['total_bytes'] = c.total_bytes - - _locals = locals().keys() - if ('cont_deleted' in _locals - or 'meta_set' in _locals - or 'cont_public' in _locals - or 'cont_private' in _locals - or 'cont_web_index' in _locals - or 'cont_err_index' in _locals): - EXIT_DICT['changed'] = True - - module.exit_json(**EXIT_DICT) - - -def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, - private, web_index, web_error): - """ Dispatch from here to work with metadata or file objects """ - cf = pyrax.cloudfiles - - if cf is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if typ == "container": - container(cf, module, container_, state, meta_, clear_meta, ttl, - public, private, web_index, web_error) - else: - meta(cf, module, container_, state, meta_, clear_meta) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - container=dict(), - state=dict(choices=['present', 'absent', 'list'], - default='present'), - meta=dict(type='dict', default=dict()), - clear_meta=dict(default=False, type='bool'), - type=dict(choices=['container', 'meta'], default='container'), - ttl=dict(type='int'), - public=dict(default=False, type='bool'), - private=dict(default=False, type='bool'), - web_index=dict(), - web_error=dict() - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - container_ = module.params.get('container') - state = module.params.get('state') - meta_ = module.params.get('meta') - clear_meta = module.params.get('clear_meta') - typ = module.params.get('type') - ttl = module.params.get('ttl') - public = module.params.get('public') - private = module.params.get('private') - web_index = module.params.get('web_index') - web_error = module.params.get('web_error') - - if state in ['present', 'absent'] and not container_: - module.fail_json(msg='please specify a container name') - if clear_meta and not typ == 'meta': - module.fail_json(msg='clear_meta can only be used when setting ' - 'metadata') - - setup_rax_module(module, pyrax) - cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, - private, web_index, web_error) - - -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -main() diff --git a/library/cloud/rax_files_objects b/library/cloud/rax_files_objects deleted file mode 100644 index f2510477674..00000000000 --- a/library/cloud/rax_files_objects +++ /dev/null @@ -1,603 +0,0 @@ -#!/usr/bin/python - -# (c) 2013, Paul Durivage -# -# This file is part of Ansible. -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_files_objects -short_description: Upload, download, and delete objects in Rackspace Cloud Files -description: - - Upload, download, and delete objects in Rackspace Cloud Files -version_added: "1.5" -options: - clear_meta: - description: - - Optionally clear existing metadata when applying metadata to existing objects. - Selecting this option is only appropriate when setting type=meta - choices: - - "yes" - - "no" - default: "no" - container: - description: - - The container to use for file object operations. - required: true - default: null - dest: - description: - - The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder". - Used to specify the destination of an operation on a remote object; i.e. a file name, - "file1", or a comma-separated list of remote objects, "file1,file2,file17" - expires: - description: - - Used to set an expiration on a file or folder uploaded to Cloud Files. - Requires an integer, specifying expiration in seconds - default: null - meta: - description: - - A hash of items to set as metadata values on an uploaded file or folder - default: null - method: - description: - - The method of operation to be performed. For example, put to upload files - to Cloud Files, get to download files from Cloud Files or delete to delete - remote objects in Cloud Files - choices: - - get - - put - - delete - default: get - src: - description: - - Source from which to upload files. Used to specify a remote object as a source for - an operation, i.e. a file name, "file1", or a comma-separated list of remote objects, - "file1,file2,file17". src and dest are mutually exclusive on remote-only object operations - default: null - structure: - description: - - Used to specify whether to maintain nested directory structure when downloading objects - from Cloud Files. Setting to false downloads the contents of a container to a single, - flat directory - choices: - - yes - - "no" - default: "yes" - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - type: - description: - - Type of object to do work on - - Metadata object or a file object - choices: - - file - - meta - default: file -author: Paul Durivage -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: "Test Cloud Files Objects" - hosts: local - gather_facts: False - tasks: - - name: "Get objects from test container" - rax_files_objects: container=testcont dest=~/Downloads/testcont - - - name: "Get single object from test container" - rax_files_objects: container=testcont src=file1 dest=~/Downloads/testcont - - - name: "Get several objects from test container" - rax_files_objects: container=testcont src=file1,file2,file3 dest=~/Downloads/testcont - - - name: "Delete one object in test container" - rax_files_objects: container=testcont method=delete dest=file1 - - - name: "Delete several objects in test container" - rax_files_objects: container=testcont method=delete dest=file2,file3,file4 - - - name: "Delete all objects in test container" - rax_files_objects: container=testcont method=delete - - - name: "Upload all files to test container" - rax_files_objects: container=testcont method=put src=~/Downloads/onehundred - - - name: "Upload one file to test container" - rax_files_objects: container=testcont method=put src=~/Downloads/testcont/file1 - - - name: "Upload one file to test container with metadata" - rax_files_objects: - container: testcont - src: ~/Downloads/testcont/file2 - method: put - meta: - testkey: testdata - who_uploaded_this: someuser@example.com - - - name: "Upload one file to test container with TTL of 60 seconds" - rax_files_objects: container=testcont method=put src=~/Downloads/testcont/file3 expires=60 - - - name: "Attempt to get remote object that does not exist" - rax_files_objects: container=testcont method=get src=FileThatDoesNotExist.jpg dest=~/Downloads/testcont - ignore_errors: yes - - - name: "Attempt to delete remote object that does not exist" - rax_files_objects: container=testcont method=delete dest=FileThatDoesNotExist.jpg - ignore_errors: yes - -- name: "Test Cloud Files Objects Metadata" - hosts: local - gather_facts: false - tasks: - - name: "Get metadata on one object" - rax_files_objects: container=testcont type=meta dest=file2 - - - name: "Get metadata on several objects" - rax_files_objects: container=testcont type=meta src=file2,file1 - - - name: "Set metadata on an object" - rax_files_objects: - container: testcont - type: meta - dest: file17 - method: put - meta: - key1: value1 - key2: value2 - clear_meta: true - - - name: "Verify metadata is set" - rax_files_objects: container=testcont type=meta src=file17 - - - name: "Delete metadata" - rax_files_objects: - container: testcont - type: meta - dest: file17 - method: delete - meta: - key1: '' - key2: '' - - - name: "Get metadata on all objects" - rax_files_objects: container=testcont type=meta -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -EXIT_DICT = dict(success=False) -META_PREFIX = 'x-object-meta-' - - -def _get_container(module, cf, container): - try: - return cf.get_container(container) - except pyrax.exc.NoSuchContainer, e: - module.fail_json(msg=e.message) - - -def upload(module, cf, container, src, dest, meta, expires): - """ Uploads a single object or a folder to Cloud Files Optionally sets an - metadata, TTL value (expires), or Content-Disposition and Content-Encoding - headers. - """ - c = _get_container(module, cf, container) - - num_objs_before = len(c.get_object_names()) - - if not src: - module.fail_json(msg='src must be specified when uploading') - - src = os.path.abspath(os.path.expanduser(src)) - is_dir = os.path.isdir(src) - - if not is_dir and not os.path.isfile(src) or not os.path.exists(src): - module.fail_json(msg='src must be a file or a directory') - if dest and is_dir: - module.fail_json(msg='dest cannot be set when whole ' - 'directories are uploaded') - - cont_obj = None - if dest and not is_dir: - try: - cont_obj = c.upload_file(src, obj_name=dest, ttl=expires) - except Exception, e: - module.fail_json(msg=e.message) - elif is_dir: - try: - id, total_bytes = cf.upload_folder(src, container=c.name, ttl=expires) - except Exception, e: - module.fail_json(msg=e.message) - - while True: - bytes = cf.get_uploaded(id) - if bytes == total_bytes: - break - time.sleep(1) - else: - try: - cont_obj = c.upload_file(src, ttl=expires) - except Exception, e: - module.fail_json(msg=e.message) - - num_objs_after = len(c.get_object_names()) - - if not meta: - meta = dict() - - meta_result = dict() - if meta: - if cont_obj: - meta_result = cont_obj.set_metadata(meta) - else: - def _set_meta(objs, meta): - """ Sets metadata on a list of objects specified by name """ - for obj in objs: - try: - result = c.get_object(obj).set_metadata(meta) - except Exception, e: - module.fail_json(msg=e.message) - else: - meta_result[obj] = result - return meta_result - - def _walker(objs, path, filenames): - """ Callback func for os.path.walk """ - prefix = '' - if path != src: - prefix = path.split(src)[-1].lstrip('/') - filenames = [os.path.join(prefix, name) for name in filenames - if not os.path.isdir(name)] - objs += filenames - - _objs = [] - os.path.walk(src, _walker, _objs) - meta_result = _set_meta(_objs, meta) - - EXIT_DICT['success'] = True - EXIT_DICT['container'] = c.name - EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name) - if cont_obj or locals().get('bytes'): - EXIT_DICT['changed'] = True - if meta_result: - EXIT_DICT['meta'] = dict(updated=True) - - if cont_obj: - EXIT_DICT['bytes'] = cont_obj.total_bytes - EXIT_DICT['etag'] = cont_obj.etag - else: - EXIT_DICT['bytes'] = total_bytes - - module.exit_json(**EXIT_DICT) - - -def download(module, cf, container, src, dest, structure): - """ Download objects from Cloud Files to a local path specified by "dest". - Optionally disable maintaining a directory structure by by passing a - false value to "structure". - """ - # Looking for an explicit destination - if not dest: - module.fail_json(msg='dest is a required argument when ' - 'downloading from Cloud Files') - - # Attempt to fetch the container by name - c = _get_container(module, cf, container) - - # Accept a single object name or a comma-separated list of objs - # If not specified, get the entire container - if src: - objs = src.split(',') - objs = map(str.strip, objs) - else: - objs = c.get_object_names() - - dest = os.path.abspath(os.path.expanduser(dest)) - is_dir = os.path.isdir(dest) - - if not is_dir: - module.fail_json(msg='dest must be a directory') - - results = [] - for obj in objs: - try: - c.download_object(obj, dest, structure=structure) - except Exception, e: - module.fail_json(msg=e.message) - else: - results.append(obj) - - len_results = len(results) - len_objs = len(objs) - - EXIT_DICT['container'] = c.name - EXIT_DICT['requested_downloaded'] = results - if results: - EXIT_DICT['changed'] = True - if len_results == len_objs: - EXIT_DICT['success'] = True - EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest) - else: - EXIT_DICT['msg'] = "Error: only %s of %s objects were " \ - "downloaded" % (len_results, len_objs) - module.exit_json(**EXIT_DICT) - - -def delete(module, cf, container, src, dest): - """ Delete specific objects by proving a single file name or a - comma-separated list to src OR dest (but not both). Omitting file name(s) - assumes the entire container is to be deleted. - """ - objs = None - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to be deleted " - "have been specified on both src and dest args") - elif dest: - objs = dest - else: - objs = src - - c = _get_container(module, cf, container) - - if objs: - objs = objs.split(',') - objs = map(str.strip, objs) - else: - objs = c.get_object_names() - - num_objs = len(objs) - - results = [] - for obj in objs: - try: - result = c.delete_object(obj) - except Exception, e: - module.fail_json(msg=e.message) - else: - results.append(result) - - num_deleted = results.count(True) - - EXIT_DICT['container'] = c.name - EXIT_DICT['deleted'] = num_deleted - EXIT_DICT['requested_deleted'] = objs - - if num_deleted: - EXIT_DICT['changed'] = True - - if num_objs == num_deleted: - EXIT_DICT['success'] = True - EXIT_DICT['msg'] = "%s objects deleted" % num_deleted - else: - EXIT_DICT['msg'] = ("Error: only %s of %s objects " - "deleted" % (num_deleted, num_objs)) - module.exit_json(**EXIT_DICT) - - -def get_meta(module, cf, container, src, dest): - """ Get metadata for a single file, comma-separated list, or entire - container - """ - c = _get_container(module, cf, container) - - objs = None - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to be deleted " - "have been specified on both src and dest args") - elif dest: - objs = dest - else: - objs = src - - if objs: - objs = objs.split(',') - objs = map(str.strip, objs) - else: - objs = c.get_object_names() - - results = dict() - for obj in objs: - try: - meta = c.get_object(obj).get_metadata() - except Exception, e: - module.fail_json(msg=e.message) - else: - results[obj] = dict() - for k, v in meta.items(): - meta_key = k.split(META_PREFIX)[-1] - results[obj][meta_key] = v - - EXIT_DICT['container'] = c.name - if results: - EXIT_DICT['meta_results'] = results - EXIT_DICT['success'] = True - module.exit_json(**EXIT_DICT) - - -def put_meta(module, cf, container, src, dest, meta, clear_meta): - """ Set metadata on a container, single file, or comma-separated list. - Passing a true value to clear_meta clears the metadata stored in Cloud - Files before setting the new metadata to the value of "meta". - """ - objs = None - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to set meta" - " have been specified on both src and dest args") - elif dest: - objs = dest - else: - objs = src - - objs = objs.split(',') - objs = map(str.strip, objs) - - c = _get_container(module, cf, container) - - results = [] - for obj in objs: - try: - result = c.get_object(obj).set_metadata(meta, clear=clear_meta) - except Exception, e: - module.fail_json(msg=e.message) - else: - results.append(result) - - EXIT_DICT['container'] = c.name - EXIT_DICT['success'] = True - if results: - EXIT_DICT['changed'] = True - EXIT_DICT['num_changed'] = True - module.exit_json(**EXIT_DICT) - - -def delete_meta(module, cf, container, src, dest, meta): - """ Removes metadata keys and values specified in meta, if any. Deletes on - all objects specified by src or dest (but not both), if any; otherwise it - deletes keys on all objects in the container - """ - objs = None - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; meta keys to be " - "deleted have been specified on both src and dest" - " args") - elif dest: - objs = dest - else: - objs = src - - objs = objs.split(',') - objs = map(str.strip, objs) - - c = _get_container(module, cf, container) - - results = [] # Num of metadata keys removed, not objects affected - for obj in objs: - if meta: - for k, v in meta.items(): - try: - result = c.get_object(obj).remove_metadata_key(k) - except Exception, e: - module.fail_json(msg=e.message) - else: - results.append(result) - else: - try: - o = c.get_object(obj) - except pyrax.exc.NoSuchObject, e: - module.fail_json(msg=e.message) - - for k, v in o.get_metadata().items(): - try: - result = o.remove_metadata_key(k) - except Exception, e: - module.fail_json(msg=e.message) - results.append(result) - - EXIT_DICT['container'] = c.name - EXIT_DICT['success'] = True - if results: - EXIT_DICT['changed'] = True - EXIT_DICT['num_deleted'] = len(results) - module.exit_json(**EXIT_DICT) - - -def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, - structure, expires): - """ Dispatch from here to work with metadata or file objects """ - cf = pyrax.cloudfiles - - if cf is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if typ == "file": - if method == 'put': - upload(module, cf, container, src, dest, meta, expires) - - elif method == 'get': - download(module, cf, container, src, dest, structure) - - elif method == 'delete': - delete(module, cf, container, src, dest) - - else: - if method == 'get': - get_meta(module, cf, container, src, dest) - - if method == 'put': - put_meta(module, cf, container, src, dest, meta, clear_meta) - - if method == 'delete': - delete_meta(module, cf, container, src, dest, meta) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - container=dict(required=True), - src=dict(), - dest=dict(), - method=dict(default='get', choices=['put', 'get', 'delete']), - type=dict(default='file', choices=['file', 'meta']), - meta=dict(type='dict', default=dict()), - clear_meta=dict(default=False, type='bool'), - structure=dict(default=True, type='bool'), - expires=dict(type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - container = module.params.get('container') - src = module.params.get('src') - dest = module.params.get('dest') - method = module.params.get('method') - typ = module.params.get('type') - meta = module.params.get('meta') - clear_meta = module.params.get('clear_meta') - structure = module.params.get('structure') - expires = module.params.get('expires') - - if clear_meta and not typ == 'meta': - module.fail_json(msg='clear_meta can only be used when setting metadata') - - setup_rax_module(module, pyrax) - cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires) - - -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -main() diff --git a/library/cloud/rax_identity b/library/cloud/rax_identity deleted file mode 100644 index ea40ea2ef46..00000000000 --- a/library/cloud/rax_identity +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_identity -short_description: Load Rackspace Cloud Identity -description: - - Verifies Rackspace Cloud credentials and returns identity information -version_added: "1.5" -options: - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present -author: Christopher H. Laco, Matt Martz -extends_documentation_fragment: rackspace.openstack -''' - -EXAMPLES = ''' -- name: Load Rackspace Cloud Identity - gather_facts: False - hosts: local - connection: local - tasks: - - name: Load Identity - local_action: - module: rax_identity - credentials: ~/.raxpub - region: DFW - register: rackspace_identity -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def cloud_identity(module, state, identity): - for arg in (state, identity): - if not arg: - module.fail_json(msg='%s is required for rax_identity' % arg) - - instance = dict( - authenticated=identity.authenticated, - credentials=identity._creds_file - ) - changed = False - - instance.update(rax_to_dict(identity)) - instance['services'] = instance.get('services', {}).keys() - - if state == 'present': - if not identity.authenticated: - module.fail_json(msg='Credentials could not be verified!') - - module.exit_json(changed=changed, identity=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - if pyrax.identity is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - cloud_identity(module, state, pyrax.identity) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_keypair b/library/cloud/rax_keypair deleted file mode 100644 index 591ad8c3597..00000000000 --- a/library/cloud/rax_keypair +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_keypair -short_description: Create a keypair for use with Rackspace Cloud Servers -description: - - Create a keypair for use with Rackspace Cloud Servers -version_added: 1.5 -options: - name: - description: - - Name of keypair - required: true - public_key: - description: - - Public Key string to upload. Can be a file path or string - default: null - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: Matt Martz -notes: - - Keypairs cannot be manipulated, only created and deleted. To "update" a - keypair you must first delete and then recreate. - - The ability to specify a file path for the public key was added in 1.7 -extends_documentation_fragment: rackspace.openstack -''' - -EXAMPLES = ''' -- name: Create a keypair - hosts: localhost - gather_facts: False - tasks: - - name: keypair request - local_action: - module: rax_keypair - credentials: ~/.raxpub - name: my_keypair - region: DFW - register: keypair - - name: Create local public key - local_action: - module: copy - content: "{{ keypair.keypair.public_key }}" - dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub" - - name: Create local private key - local_action: - module: copy - content: "{{ keypair.keypair.private_key }}" - dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}" - -- name: Create a keypair - hosts: localhost - gather_facts: False - tasks: - - name: keypair request - local_action: - module: rax_keypair - credentials: ~/.raxpub - name: my_keypair - public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}" - region: DFW - register: keypair -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def rax_keypair(module, name, public_key, state): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - keypair = {} - - if state == 'present': - if os.path.isfile(public_key): - try: - f = open(public_key) - public_key = f.read() - f.close() - except Exception, e: - module.fail_json(msg='Failed to load %s' % public_key) - - try: - keypair = cs.keypairs.find(name=name) - except cs.exceptions.NotFound: - try: - keypair = cs.keypairs.create(name, public_key) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - keypair = cs.keypairs.find(name=name) - except: - pass - - if keypair: - try: - keypair.delete() - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, keypair=rax_to_dict(keypair)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(), - public_key=dict(), - state=dict(default='present', choices=['absent', 'present']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - public_key = module.params.get('public_key') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - rax_keypair(module, name, public_key, state) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_meta b/library/cloud/rax_meta deleted file mode 100644 index 2e1d90f5389..00000000000 --- a/library/cloud/rax_meta +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_meta -short_description: Manipulate metadata for Rackspace Cloud Servers -description: - - Manipulate metadata for Rackspace Cloud Servers -version_added: 1.7 -options: - address: - description: - - Server IP address to modify metadata for, will match any IP assigned to - the server - id: - description: - - Server ID to modify metadata for - name: - description: - - Server name to modify metadata for - default: null - meta: - description: - - A hash of metadata to associate with the instance - default: null -author: Matt Martz -extends_documentation_fragment: rackspace.openstack -''' - -EXAMPLES = ''' -- name: Set metadata for a server - hosts: all - gather_facts: False - tasks: - - name: Set metadata - local_action: - module: rax_meta - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW - meta: - group: primary_group - groups: - - group_two - - group_three - app: my_app - - - name: Clear metadata - local_action: - module: rax_meta - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def rax_meta(module, address, name, server_id, meta): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - search_opts = {} - if name: - search_opts = dict(name='^%s$' % name) - try: - servers = cs.servers.list(search_opts=search_opts) - except Exception, e: - module.fail_json(msg='%s' % e.message) - elif address: - servers = [] - try: - for server in cs.servers.list(): - for addresses in server.networks.values(): - if address in addresses: - servers.append(server) - break - except Exception, e: - module.fail_json(msg='%s' % e.message) - elif server_id: - servers = [] - try: - servers.append(cs.servers.get(server_id)) - except Exception, e: - pass - - if len(servers) > 1: - module.fail_json(msg='Multiple servers found matching provided ' - 'search parameters') - elif not servers: - module.fail_json(msg='Failed to find a server matching provided ' - 'search parameters') - - # Normalize and ensure all metadata values are strings - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, basestring): - meta[k] = '%s' % v - - server = servers[0] - if server.metadata == meta: - changed = False - else: - changed = True - removed = set(server.metadata.keys()).difference(meta.keys()) - cs.servers.delete_meta(server, list(removed)) - cs.servers.set_meta(server, meta) - server.get() - - module.exit_json(changed=changed, meta=server.metadata) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - id=dict(), - name=dict(), - meta=dict(type='dict', default=dict()), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[['address', 'id', 'name']], - required_one_of=[['address', 'id', 'name']], - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params.get('address') - server_id = module.params.get('id') - name = module.params.get('name') - meta = module.params.get('meta') - - setup_rax_module(module, pyrax) - - rax_meta(module, address, name, server_id, meta) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_network b/library/cloud/rax_network deleted file mode 100644 index bc4745a7a84..00000000000 --- a/library/cloud/rax_network +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_network -short_description: create / delete an isolated network in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud isolated network. -version_added: "1.4" -options: - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - label: - description: - - Label (name) to give the network - default: null - cidr: - description: - - cidr of the network being created - default: null -author: Christopher H. Laco, Jesse Keating -extends_documentation_fragment: rackspace.openstack -''' - -EXAMPLES = ''' -- name: Build an Isolated Network - gather_facts: False - - tasks: - - name: Network create request - local_action: - module: rax_network - credentials: ~/.raxpub - label: my-net - cidr: 192.168.3.0/24 - state: present -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def cloud_network(module, state, label, cidr): - for arg in (state, label, cidr): - if not arg: - module.fail_json(msg='%s is required for cloud_networks' % arg) - - changed = False - network = None - networks = [] - - if not pyrax.cloud_networks: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - try: - network = pyrax.cloud_networks.find_network_by_label(label) - except pyrax.exceptions.NetworkNotFound: - try: - network = pyrax.cloud_networks.create(label, cidr=cidr) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - network = pyrax.cloud_networks.find_network_by_label(label) - network.delete() - changed = True - except pyrax.exceptions.NetworkNotFound: - pass - except Exception, e: - module.fail_json(msg='%s' % e.message) - - if network: - instance = dict(id=network.id, - label=network.label, - cidr=network.cidr) - networks.append(instance) - - module.exit_json(changed=changed, networks=networks) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', - choices=['present', 'absent']), - label=dict(), - cidr=dict() - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - label = module.params.get('label') - cidr = module.params.get('cidr') - - setup_rax_module(module, pyrax) - - cloud_network(module, state, label, cidr) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_queue b/library/cloud/rax_queue deleted file mode 100644 index d3e5ac3f81e..00000000000 --- a/library/cloud/rax_queue +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_queue -short_description: create / delete a queue in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud queue. -version_added: "1.5" -options: - name: - description: - - Name to give the queue - default: null - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: Christopher H. Laco, Matt Martz -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: Build a Queue - gather_facts: False - hosts: local - connection: local - tasks: - - name: Queue create request - local_action: - module: rax_queue - credentials: ~/.raxpub - name: my-queue - region: DFW - state: present - register: my_queue -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def cloud_queue(module, state, name): - for arg in (state, name): - if not arg: - module.fail_json(msg='%s is required for rax_queue' % arg) - - changed = False - queues = [] - instance = {} - - cq = pyrax.queues - if not cq: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - for queue in cq.list(): - if name != queue.name: - continue - - queues.append(queue) - - if len(queues) > 1: - module.fail_json(msg='Multiple Queues were matched by name') - - if state == 'present': - if not queues: - try: - queue = cq.create(name) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - queue = queues[0] - - instance = dict(name=queue.name) - result = dict(changed=changed, queue=instance) - module.exit_json(**result) - - elif state == 'absent': - if queues: - queue = queues[0] - try: - queue.delete() - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, queue=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(), - state=dict(default='present', choices=['present', 'absent']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - cloud_queue(module, state, name) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_scaling_group b/library/cloud/rax_scaling_group deleted file mode 100644 index d884d3c1303..00000000000 --- a/library/cloud/rax_scaling_group +++ /dev/null @@ -1,351 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_scaling_group -short_description: Manipulate Rackspace Cloud Autoscale Groups -description: - - Manipulate Rackspace Cloud Autoscale Groups -version_added: 1.7 -options: - cooldown: - description: - - The period of time, in seconds, that must pass before any scaling can - occur after the previous scaling. Must be an integer between 0 and - 86400 (24 hrs). - disk_config: - description: - - Disk partitioning strategy - choices: - - auto - - manual - default: auto - files: - description: - - 'Files to insert into the instance. Hash of C(remotepath: localpath)' - default: null - flavor: - description: - - flavor to use for the instance - required: true - image: - description: - - image to use for the instance. Can be an C(id), C(human_id) or C(name) - required: true - key_name: - description: - - key pair to use on the instance - default: null - loadbalancers: - description: - - List of load balancer C(id) and C(port) hashes - max_entities: - description: - - The maximum number of entities that are allowed in the scaling group. - Must be an integer between 0 and 1000. - required: true - meta: - description: - - A hash of metadata to associate with the instance - default: null - min_entities: - description: - - The minimum number of entities that are allowed in the scaling group. - Must be an integer between 0 and 1000. - required: true - name: - description: - - Name to give the scaling group - required: true - networks: - description: - - The network to attach to the instances. If specified, you must include - ALL networks including the public and private interfaces. Can be C(id) - or C(label). - default: - - public - - private - server_name: - description: - - The base name for servers created by Autoscale - required: true - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: Matt Martz -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' ---- -- hosts: localhost - gather_facts: false - connection: local - tasks: - - rax_scaling_group: - credentials: ~/.raxpub - region: ORD - cooldown: 300 - flavor: performance1-1 - image: bb02b1a3-bc77-4d17-ab5b-421d89850fca - min_entities: 5 - max_entities: 10 - name: ASG Test - server_name: asgtest - loadbalancers: - - id: 228385 - port: 80 - register: asg -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, - image=None, key_name=None, loadbalancers=[], meta={}, - min_entities=0, max_entities=0, name=None, networks=[], - server_name=None, state='present'): - changed = False - - au = pyrax.autoscale - cnw = pyrax.cloud_networks - cs = pyrax.cloudservers - if not au or not cnw or not cs: - module.fail_json(msg='Failed to instantiate clients. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - # Normalize and ensure all metadata values are strings - if meta: - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, basestring): - meta[k] = '%s' % v - - if image: - image = rax_find_image(module, pyrax, image) - - nics = [] - if networks: - for network in networks: - nics.extend(rax_find_network(module, pyrax, network)) - - for nic in nics: - # pyrax is currently returning net-id, but we need uuid - # this check makes this forward compatible for a time when - # pyrax uses uuid instead - if nic.get('net-id'): - nic.update(uuid=nic['net-id']) - del nic['net-id'] - - # Handle the file contents - personality = [] - if files: - for rpath in files.keys(): - lpath = os.path.expanduser(files[rpath]) - try: - f = open(lpath, 'r') - personality.append({ - 'path': rpath, - 'contents': f.read() - }) - f.close() - except Exception, e: - module.fail_json(msg='Failed to load %s' % lpath) - - lbs = [] - if loadbalancers: - for lb in loadbalancers: - lb_id = lb.get('id') - port = lb.get('port') - if not lb_id or not port: - continue - lbs.append((lb_id, port)) - - try: - sg = au.find(name=name) - except pyrax.exceptions.NoUniqueMatch, e: - module.fail_json(msg='%s' % e.message) - except pyrax.exceptions.NotFound: - try: - sg = au.create(name, cooldown=cooldown, - min_entities=min_entities, - max_entities=max_entities, - launch_config_type='launch_server', - server_name=server_name, image=image, - flavor=flavor, disk_config=disk_config, - metadata=meta, personality=files, - networks=nics, load_balancers=lbs, - key_name=key_name) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - if not changed: - # Scaling Group Updates - group_args = {} - if cooldown != sg.cooldown: - group_args['cooldown'] = cooldown - - if min_entities != sg.min_entities: - group_args['min_entities'] = min_entities - - if max_entities != sg.max_entities: - group_args['max_entities'] = max_entities - - if group_args: - changed = True - sg.update(**group_args) - - # Launch Configuration Updates - lc = sg.get_launch_config() - lc_args = {} - if server_name != lc.get('name'): - lc_args['name'] = server_name - - if image != lc.get('image'): - lc_args['image'] = image - - if flavor != lc.get('flavor'): - lc_args['flavor'] = flavor - - if disk_config != lc.get('disk_config'): - lc_args['disk_config'] = disk_config - - if meta != lc.get('metadata'): - lc_args['metadata'] = meta - - if files != lc.get('personality'): - lc_args['personality'] = files - - if nics != lc.get('networks'): - lc_args['networks'] = nics - - if lbs != lc.get('load_balancers'): - # Work around for https://github.com/rackspace/pyrax/pull/393 - lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs) - - if key_name != lc.get('key_name'): - lc_args['key_name'] = key_name - - if lc_args: - # Work around for https://github.com/rackspace/pyrax/pull/389 - if 'flavor' not in lc_args: - lc_args['flavor'] = lc.get('flavor') - changed = True - sg.update_launch_config(**lc_args) - - sg.get() - - module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) - - else: - try: - sg = au.find(name=name) - sg.delete() - changed = True - except pyrax.exceptions.NotFound, e: - sg = {} - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - cooldown=dict(type='int', default=300), - disk_config=dict(choices=['auto', 'manual']), - files=dict(type='list', default=[]), - flavor=dict(required=True), - image=dict(required=True), - key_name=dict(), - loadbalancers=dict(type='list'), - meta=dict(type='dict', default={}), - min_entities=dict(type='int', required=True), - max_entities=dict(type='int', required=True), - name=dict(required=True), - networks=dict(type='list', default=['public', 'private']), - server_name=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - cooldown = module.params.get('cooldown') - disk_config = module.params.get('disk_config') - if disk_config: - disk_config = disk_config.upper() - files = module.params.get('files') - flavor = module.params.get('flavor') - image = module.params.get('image') - key_name = module.params.get('key_name') - loadbalancers = module.params.get('loadbalancers') - meta = module.params.get('meta') - min_entities = module.params.get('min_entities') - max_entities = module.params.get('max_entities') - name = module.params.get('name') - networks = module.params.get('networks') - server_name = module.params.get('server_name') - state = module.params.get('state') - - if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000: - module.fail_json(msg='min_entities and max_entities must be an ' - 'integer between 0 and 1000') - - if not 0 <= cooldown <= 86400: - module.fail_json(msg='cooldown must be an integer between 0 and 86400') - - setup_rax_module(module, pyrax) - - rax_asg(module, cooldown=cooldown, disk_config=disk_config, - files=files, flavor=flavor, image=image, meta=meta, - key_name=key_name, loadbalancers=loadbalancers, - min_entities=min_entities, max_entities=max_entities, - name=name, networks=networks, server_name=server_name, - state=state) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -# invoke the module -main() diff --git a/library/cloud/rax_scaling_policy b/library/cloud/rax_scaling_policy deleted file mode 100644 index b3da82460d8..00000000000 --- a/library/cloud/rax_scaling_policy +++ /dev/null @@ -1,283 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_scaling_policy -short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy -description: - - Manipulate Rackspace Cloud Autoscale Scaling Policy -version_added: 1.7 -options: - at: - description: - - The UTC time when this policy will be executed. The time must be - formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as - C(2013-05-19T08:07:08Z) - change: - description: - - The change, either as a number of servers or as a percentage, to make - in the scaling group. If this is a percentage, you must set - I(is_percent) to C(true) also. - cron: - description: - - The time when the policy will be executed, as a cron entry. For - example, if this is parameter is set to C(1 0 * * *) - cooldown: - description: - - The period of time, in seconds, that must pass before any scaling can - occur after the previous scaling. Must be an integer between 0 and - 86400 (24 hrs). - desired_capacity: - description: - - The desired server capacity of the scaling the group; that is, how - many servers should be in the scaling group. - is_percent: - description: - - Whether the value in I(change) is a percent value - default: false - name: - description: - - Name to give the policy - required: true - policy_type: - description: - - The type of policy that will be executed for the current release. - choices: - - webhook - - schedule - required: true - scaling_group: - description: - - Name of the scaling group that this policy will be added to - required: true - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: Matt Martz -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' ---- -- hosts: localhost - gather_facts: false - connection: local - tasks: - - rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - at: '2013-05-19T08:07:08Z' - change: 25 - cooldown: 300 - is_percent: true - name: ASG Test Policy - at - policy_type: schedule - scaling_group: ASG Test - register: asps_at - - - rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - cron: '1 0 * * *' - change: 25 - cooldown: 300 - is_percent: true - name: ASG Test Policy - cron - policy_type: schedule - scaling_group: ASG Test - register: asp_cron - - - rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - cooldown: 300 - desired_capacity: 5 - name: ASG Test Policy - webhook - policy_type: webhook - scaling_group: ASG Test - register: asp_webhook -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def rax_asp(module, at=None, change=0, cron=None, cooldown=300, - desired_capacity=0, is_percent=False, name=None, - policy_type=None, scaling_group=None, state='present'): - changed = False - - au = pyrax.autoscale - if not au: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - try: - UUID(scaling_group) - except ValueError: - try: - sg = au.find(name=scaling_group) - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - try: - sg = au.get(scaling_group) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - if state == 'present': - policies = filter(lambda p: name == p.name, sg.list_policies()) - if len(policies) > 1: - module.fail_json(msg='No unique policy match found by name') - if at: - args = dict(at=at) - elif cron: - args = dict(cron=cron) - else: - args = None - - if not policies: - try: - policy = sg.add_policy(name, policy_type=policy_type, - cooldown=cooldown, change=change, - is_percent=is_percent, - desired_capacity=desired_capacity, - args=args) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - else: - policy = policies[0] - kwargs = {} - if policy_type != policy.type: - kwargs['policy_type'] = policy_type - - if cooldown != policy.cooldown: - kwargs['cooldown'] = cooldown - - if hasattr(policy, 'change') and change != policy.change: - kwargs['change'] = change - - if hasattr(policy, 'changePercent') and is_percent is False: - kwargs['change'] = change - kwargs['is_percent'] = False - elif hasattr(policy, 'change') and is_percent is True: - kwargs['change'] = change - kwargs['is_percent'] = True - - if hasattr(policy, 'desiredCapacity') and change: - kwargs['change'] = change - elif ((hasattr(policy, 'change') or - hasattr(policy, 'changePercent')) and desired_capacity): - kwargs['desired_capacity'] = desired_capacity - - if hasattr(policy, 'args') and args != policy.args: - kwargs['args'] = args - - if kwargs: - policy.update(**kwargs) - changed = True - - policy.get() - - module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) - - else: - try: - policies = filter(lambda p: name == p.name, sg.list_policies()) - if len(policies) > 1: - module.fail_json(msg='No unique policy match found by name') - elif not policies: - policy = {} - else: - policy.delete() - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - at=dict(), - change=dict(type='int'), - cron=dict(), - cooldown=dict(type='int', default=300), - desired_capacity=dict(type='int'), - is_percent=dict(type='bool', default=False), - name=dict(required=True), - policy_type=dict(required=True, choices=['webhook', 'schedule']), - scaling_group=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[ - ['cron', 'at'], - ['change', 'desired_capacity'], - ] - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - at = module.params.get('at') - change = module.params.get('change') - cron = module.params.get('cron') - cooldown = module.params.get('cooldown') - desired_capacity = module.params.get('desired_capacity') - is_percent = module.params.get('is_percent') - name = module.params.get('name') - policy_type = module.params.get('policy_type') - scaling_group = module.params.get('scaling_group') - state = module.params.get('state') - - if (at or cron) and policy_type == 'webhook': - module.fail_json(msg='policy_type=schedule is required for a time ' - 'based policy') - - setup_rax_module(module, pyrax) - - rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown, - desired_capacity=desired_capacity, is_percent=is_percent, - name=name, policy_type=policy_type, scaling_group=scaling_group, - state=state) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -# invoke the module -main() diff --git a/library/cloud/rds b/library/cloud/rds deleted file mode 100644 index ba3f1e38d39..00000000000 --- a/library/cloud/rds +++ /dev/null @@ -1,650 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rds -version_added: "1.3" -short_description: create, delete, or modify an Amazon rds instance -description: - - Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. -options: - command: - description: - - Specifies the action to take. - required: true - default: null - aliases: [] - choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'restore' ] - instance_name: - description: - - Database instance identifier. - required: true - default: null - aliases: [] - source_instance: - description: - - Name of the database to replicate. Used only when command=replicate. - required: false - default: null - aliases: [] - db_engine: - description: - - The type of database. Used only when command=create. - required: false - default: null - aliases: [] - choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'] - size: - description: - - Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify. - required: false - default: null - aliases: [] - instance_type: - description: - - The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance. - required: false - default: null - aliases: [] - username: - description: - - Master database username. Used only when command=create. - required: false - default: null - aliases: [] - password: - description: - - Password for the master database username. Used only when command=create or command=modify. - required: false - default: null - aliases: [] - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: true - default: null - aliases: [ 'aws_region', 'ec2_region' ] - db_name: - description: - - Name of a database to create within the instance. If not specified then no database is created. Used only when command=create. - required: false - default: null - aliases: [] - engine_version: - description: - - Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used. - required: false - default: null - aliases: [] - parameter_group: - description: - - Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify. - required: false - default: null - aliases: [] - license_model: - description: - - The license model for this DB instance. Used only when command=create or command=restore. - required: false - default: null - aliases: [] - choices: [ 'license-included', 'bring-your-own-license', 'general-public-license' ] - multi_zone: - description: - - Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify. - choices: [ "yes", "no" ] - required: false - default: null - aliases: [] - iops: - description: - - Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000. - required: false - default: null - aliases: [] - security_groups: - description: - - Comma separated list of one or more security groups. Used only when command=create or command=modify. - required: false - default: null - aliases: [] - vpc_security_groups: - description: - - Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify. - required: false - default: null - aliases: [] - port: - description: - - Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1443 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate. - required: false - default: null - aliases: [] - upgrade: - description: - - Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate. - required: false - default: no - choices: [ "yes", "no" ] - aliases: [] - option_group: - description: - - The name of the option group to use. If not specified then the default option group is used. Used only when command=create. - required: false - default: null - aliases: [] - maint_window: - description: - - "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify." - required: false - default: null - aliases: [] - backup_window: - description: - - Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify. - required: false - default: null - aliases: [] - backup_retention: - description: - - "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify." - required: false - default: null - aliases: [] - zone: - description: - - availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore. - required: false - default: null - aliases: ['aws_zone', 'ec2_zone'] - subnet: - description: - - VPC subnet group. If specified then a VPC instance is created. Used only when command=create. - required: false - default: null - aliases: [] - snapshot: - description: - - Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. Used only when command=delete or command=snapshot. - required: false - default: null - aliases: [] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_access_key', 'access_key' ] - wait: - description: - - When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated. - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 - aliases: [] - apply_immediately: - description: - - Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window. - default: no - choices: [ "yes", "no" ] - aliases: [] - new_instance_name: - description: - - Name to rename an instance to. Used only when command=modify. - required: false - default: null - aliases: [] - version_added: 1.5 -requirements: [ "boto" ] -author: Bruce Pennypacker -''' - -EXAMPLES = ''' -# Basic mysql provisioning example -- rds: > - command=create - instance_name=new_database - db_engine=MySQL - size=10 - instance_type=db.m1.small - username=mysql_admin - password=1nsecure - -# Create a read-only replica and wait for it to become available -- rds: > - command=replicate - instance_name=new_database_replica - source_instance=new_database - wait=yes - wait_timeout=600 - -# Delete an instance, but create a snapshot before doing so -- rds: > - command=delete - instance_name=new_database - snapshot=new_database_snapshot - -# Get facts about an instance -- rds: > - command=facts - instance_name=new_database - register: new_database_facts - -# Rename an instance and wait for the change to take effect -- rds: > - command=modify - instance_name=new_database - new_instance_name=renamed_database - wait=yes - -''' - -import sys -import time - -try: - import boto.rds -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def get_current_resource(conn, resource, command): - # There will be exceptions but we want the calling code to handle them - if command == 'snapshot': - return conn.get_all_dbsnapshots(snapshot_id=resource)[0] - else: - return conn.get_all_dbinstances(resource)[0] - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'restore'], required=True), - instance_name = dict(required=True), - source_instance = dict(required=False), - db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False), - size = dict(required=False), - instance_type = dict(aliases=['type'], required=False), - username = dict(required=False), - password = dict(no_log=True, required=False), - db_name = dict(required=False), - engine_version = dict(required=False), - parameter_group = dict(required=False), - license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license'], required=False), - multi_zone = dict(type='bool', default=False), - iops = dict(required=False), - security_groups = dict(required=False), - vpc_security_groups = dict(type='list', required=False), - port = dict(required=False), - upgrade = dict(type='bool', default=False), - option_group = dict(required=False), - maint_window = dict(required=False), - backup_window = dict(required=False), - backup_retention = dict(required=False), - zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False), - subnet = dict(required=False), - wait = dict(type='bool', default=False), - wait_timeout = dict(default=300), - snapshot = dict(required=False), - apply_immediately = dict(type='bool', default=False), - new_instance_name = dict(required=False), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - ) - - command = module.params.get('command') - instance_name = module.params.get('instance_name') - source_instance = module.params.get('source_instance') - db_engine = module.params.get('db_engine') - size = module.params.get('size') - instance_type = module.params.get('instance_type') - username = module.params.get('username') - password = module.params.get('password') - db_name = module.params.get('db_name') - engine_version = module.params.get('engine_version') - parameter_group = module.params.get('parameter_group') - license_model = module.params.get('license_model') - multi_zone = module.params.get('multi_zone') - iops = module.params.get('iops') - security_groups = module.params.get('security_groups') - vpc_security_groups = module.params.get('vpc_security_groups') - port = module.params.get('port') - upgrade = module.params.get('upgrade') - option_group = module.params.get('option_group') - maint_window = module.params.get('maint_window') - subnet = module.params.get('subnet') - backup_window = module.params.get('backup_window') - backup_retention = module.params.get('backup_retention') - region = module.params.get('region') - zone = module.params.get('zone') - aws_secret_key = module.params.get('aws_secret_key') - aws_access_key = module.params.get('aws_access_key') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - snapshot = module.params.get('snapshot') - apply_immediately = module.params.get('apply_immediately') - new_instance_name = module.params.get('new_instance_name') - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if not region: - module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION.")) - - # connect to the rds endpoint - try: - conn = connect_to_aws(boto.rds, region, **aws_connect_params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - def invalid_security_group_type(subnet): - if subnet: - return 'security_groups' - else: - return 'vpc_security_groups' - - # Package up the optional parameters - params = {} - - # Validate parameters for each command - if command == 'create': - required_vars = [ 'instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password' ] - invalid_vars = [ 'source_instance', 'snapshot', 'apply_immediately', 'new_instance_name' ] + [invalid_security_group_type(subnet)] - - elif command == 'replicate': - required_vars = [ 'instance_name', 'source_instance' ] - invalid_vars = [ 'db_engine', 'size', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'backup_window', 'backup_retention', 'subnet', 'snapshot', 'apply_immediately', 'new_instance_name' ] - - elif command == 'delete': - required_vars = [ 'instance_name' ] - invalid_vars = [ 'db_engine', 'size', 'instance_type', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups' ,'security_groups', 'option_group', 'maint_window', 'backup_window', 'backup_retention', 'port', 'upgrade', 'subnet', 'zone' , 'source_instance', 'apply_immediately', 'new_instance_name' ] - - elif command == 'facts': - required_vars = [ 'instance_name' ] - invalid_vars = [ 'db_engine', 'size', 'instance_type', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'backup_window', 'backup_retention', 'port', 'upgrade', 'subnet', 'zone', 'wait', 'source_instance' 'apply_immediately', 'new_instance_name' ] - - elif command == 'modify': - required_vars = [ 'instance_name' ] - if password: - params["master_password"] = password - invalid_vars = [ 'db_engine', 'username', 'db_name', 'engine_version', 'license_model', 'option_group', 'port', 'upgrade', 'subnet', 'zone', 'source_instance'] - - elif command == 'promote': - required_vars = [ 'instance_name' ] - invalid_vars = [ 'db_engine', 'size', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'subnet', 'source_instance', 'snapshot', 'apply_immediately', 'new_instance_name' ] - - elif command == 'snapshot': - required_vars = [ 'instance_name', 'snapshot'] - invalid_vars = [ 'db_engine', 'size', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'subnet', 'source_instance', 'apply_immediately', 'new_instance_name' ] - - elif command == 'restore': - required_vars = [ 'instance_name', 'snapshot', 'instance_type' ] - invalid_vars = [ 'db_engine', 'db_name', 'username', 'password', 'engine_version', 'option_group', 'source_instance', 'apply_immediately', 'new_instance_name', 'vpc_security_groups', 'security_groups' ] - - for v in required_vars: - if not module.params.get(v): - module.fail_json(msg = str("Parameter %s required for %s command" % (v, command))) - - for v in invalid_vars: - if module.params.get(v): - module.fail_json(msg = str("Parameter %s invalid for %s command" % (v, command))) - - if db_engine: - params["engine"] = db_engine - - if port: - params["port"] = port - - if db_name: - params["db_name"] = db_name - - if parameter_group: - params["param_group"] = parameter_group - - if zone: - params["availability_zone"] = zone - - if maint_window: - params["preferred_maintenance_window"] = maint_window - - if backup_window: - params["preferred_backup_window"] = backup_window - - if backup_retention: - params["backup_retention_period"] = backup_retention - - if multi_zone: - params["multi_az"] = multi_zone - - if engine_version: - params["engine_version"] = engine_version - - if upgrade: - params["auto_minor_version_upgrade"] = upgrade - - if subnet: - params["db_subnet_group_name"] = subnet - - if license_model: - params["license_model"] = license_model - - if option_group: - params["option_group_name"] = option_group - - if iops: - params["iops"] = iops - - if security_groups: - params["security_groups"] = security_groups.split(',') - - if vpc_security_groups: - groups_list = [] - for x in vpc_security_groups: - groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x)) - params["vpc_security_groups"] = groups_list - - if new_instance_name: - params["new_instance_id"] = new_instance_name - - changed = True - - if command in ['create', 'restore', 'facts']: - try: - result = conn.get_all_dbinstances(instance_name)[0] - changed = False - except boto.exception.BotoServerError, e: - try: - if command == 'create': - result = conn.create_dbinstance(instance_name, size, instance_type, username, password, **params) - if command == 'restore': - result = conn.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) - if command == 'facts': - module.fail_json(msg = "DB Instance %s does not exist" % instance_name) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - if command == 'snapshot': - try: - result = conn.get_all_dbsnapshots(snapshot)[0] - changed = False - except boto.exception.BotoServerError, e: - try: - result = conn.create_dbsnapshot(snapshot, instance_name) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - if command == 'delete': - try: - result = conn.get_all_dbinstances(instance_name)[0] - if result.status == 'deleting': - module.exit_json(changed=False) - except boto.exception.BotoServerError, e: - module.exit_json(changed=False) - try: - if snapshot: - params["skip_final_snapshot"] = False - params["final_snapshot_id"] = snapshot - else: - params["skip_final_snapshot"] = True - result = conn.delete_dbinstance(instance_name, **params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - if command == 'replicate': - try: - if instance_type: - params["instance_class"] = instance_type - result = conn.create_dbinstance_read_replica(instance_name, source_instance, **params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - if command == 'modify': - try: - params["apply_immediately"] = apply_immediately - result = conn.modify_dbinstance(instance_name, **params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - if apply_immediately: - if new_instance_name: - # Wait until the new instance name is valid - found = 0 - while found == 0: - instances = conn.get_all_dbinstances() - for i in instances: - if i.id == new_instance_name: - instance_name = new_instance_name - found = 1 - if found == 0: - time.sleep(5) - - # The name of the database has now changed, so we have - # to force result to contain the new instance, otherwise - # the call below to get_current_resource will fail since it - # will be looking for the old instance name. - result.id = new_instance_name - else: - # Wait for a few seconds since it takes a while for AWS - # to change the instance from 'available' to 'modifying' - time.sleep(5) - - if command == 'promote': - try: - result = conn.promote_read_replica(instance_name, **params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - # If we're not waiting for a delete to complete then we're all done - # so just return - if command == 'delete' and not wait: - module.exit_json(changed=True) - - try: - resource = get_current_resource(conn, result.id, command) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - # Wait for the resource to be available if requested - if wait: - try: - wait_timeout = time.time() + wait_timeout - time.sleep(5) - - while wait_timeout > time.time() and resource.status != 'available': - time.sleep(5) - if wait_timeout <= time.time(): - module.fail_json(msg = "Timeout waiting for resource %s" % resource.id) - resource = get_current_resource(conn, result.id, command) - except boto.exception.BotoServerError, e: - # If we're waiting for an instance to be deleted then - # get_all_dbinstances will eventually throw a - # DBInstanceNotFound error. - if command == 'delete' and e.error_code == 'DBInstanceNotFound': - module.exit_json(changed=True) - else: - module.fail_json(msg = e.error_message) - - # If we got here then pack up all the instance details to send - # back to ansible - if command == 'snapshot': - d = { - 'id' : resource.id, - 'create_time' : resource.snapshot_create_time, - 'status' : resource.status, - 'availability_zone' : resource.availability_zone, - 'instance_id' : resource.instance_id, - 'instance_created' : resource.instance_create_time, - } - try: - d["snapshot_type"] = resource.snapshot_type - d["iops"] = resource.iops - except AttributeError, e: - pass # needs boto >= 2.21.0 - - return module.exit_json(changed=changed, snapshot=d) - - d = { - 'id' : resource.id, - 'create_time' : resource.create_time, - 'status' : resource.status, - 'availability_zone' : resource.availability_zone, - 'backup_retention' : resource.backup_retention_period, - 'backup_window' : resource.preferred_backup_window, - 'maintenance_window' : resource.preferred_maintenance_window, - 'multi_zone' : resource.multi_az, - 'instance_type' : resource.instance_class, - 'username' : resource.master_username, - 'iops' : resource.iops - } - - # Endpoint exists only if the instance is available - if resource.status == 'available' and command != 'snapshot': - d["endpoint"] = resource.endpoint[0] - d["port"] = resource.endpoint[1] - if resource.vpc_security_groups is not None: - d["vpc_security_groups"] = ','.join(x.vpc_group for x in resource.vpc_security_groups) - else: - d["vpc_security_groups"] = None - else: - d["endpoint"] = None - d["port"] = None - d["vpc_security_groups"] = None - - # ReadReplicaSourceDBInstanceIdentifier may or may not exist - try: - d["replication_source"] = resource.ReadReplicaSourceDBInstanceIdentifier - except Exception, e: - d["replication_source"] = None - - module.exit_json(changed=changed, instance=d) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/rds_param_group b/library/cloud/rds_param_group deleted file mode 100644 index 39f9432057a..00000000000 --- a/library/cloud/rds_param_group +++ /dev/null @@ -1,313 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rds_param_group -version_added: "1.5" -short_description: manage RDS parameter groups -description: - - Creates, modifies, and deletes RDS parameter groups. This module has a dependency on python-boto >= 2.5. -options: - state: - description: - - Specifies whether the group should be present or absent. - required: true - default: present - aliases: [] - choices: [ 'present' , 'absent' ] - name: - description: - - Database parameter group identifier. - required: true - default: null - aliases: [] - description: - description: - - Database parameter group description. Only set when a new group is added. - required: false - default: null - aliases: [] - engine: - description: - - The type of database for this group. Required for state=present. - required: false - default: null - aliases: [] - choices: [ 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-se-11.2', 'oracle-se1-11.2', 'postgres9.3', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0'] - immediate: - description: - - Whether to apply the changes immediately, or after the next reboot of any associated instances. - required: false - default: null - aliases: [] - params: - description: - - Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3), or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group. - required: false - default: null - aliases: [] - choices: [ 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-se-11.2', 'oracle-se1-11.2', 'postgres9.3', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0'] - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: true - default: null - aliases: [ 'aws_region', 'ec2_region' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_access_key', 'access_key' ] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_secret_key', 'secret_key' ] -requirements: [ "boto" ] -author: Scott Anderson -''' - -EXAMPLES = ''' -# Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024 -- rds_param_group: > - state=present - name=norwegian_blue - description=My Fancy Ex Parrot Group - engine=mysql5.6 - params='{"auto_increment_increment": "42K"}' - -# Remove a parameter group -- rds_param_group: > - state=absent - name=norwegian_blue -''' - -import sys -import time - -VALID_ENGINES = [ - 'mysql5.1', - 'mysql5.5', - 'mysql5.6', - 'oracle-ee-11.2', - 'oracle-se-11.2', - 'oracle-se1-11.2', - 'postgres9.3', - 'sqlserver-ee-10.5', - 'sqlserver-ee-11.0', - 'sqlserver-ex-10.5', - 'sqlserver-ex-11.0', - 'sqlserver-se-10.5', - 'sqlserver-se-11.0', - 'sqlserver-web-10.5', - 'sqlserver-web-11.0', -] - -try: - import boto.rds - from boto.exception import BotoServerError -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -# returns a tuple: (whether or not a parameter was changed, the remaining parameters that weren't found in this parameter group) - -class NotModifiableError(StandardError): - def __init__(self, error_message, *args): - super(NotModifiableError, self).__init__(error_message, *args) - self.error_message = error_message - - def __repr__(self): - return 'NotModifiableError: %s' % self.error_message - - def __str__(self): - return 'NotModifiableError: %s' % self.error_message - -INT_MODIFIERS = { - 'K': 1024, - 'M': pow(1024, 2), - 'G': pow(1024, 3), - 'T': pow(1024, 4), -} - -TRUE_VALUES = ('on', 'true', 'yes', '1',) - -def set_parameter(param, value, immediate): - """ - Allows setting parameters with 10M = 10* 1024 * 1024 and so on. - """ - converted_value = value - - if param.type == 'string': - converted_value = str(value) - - elif param.type == 'integer': - if isinstance(value, basestring): - try: - for modifier in INT_MODIFIERS.keys(): - if value.endswith(modifier): - converted_value = int(value[:-1]) * INT_MODIFIERS[modifier] - converted_value = int(converted_value) - except ValueError: - # may be based on a variable (ie. {foo*3/4}) so - # just pass it on through to boto - converted_value = str(value) - elif type(value) == bool: - converted_value = 1 if value else 0 - else: - converted_value = int(value) - - elif param.type == 'boolean': - if isinstance(value, basestring): - converted_value = value in TRUE_VALUES - else: - converted_value = bool(value) - - param.value = converted_value - param.apply(immediate) - -def modify_group(group, params, immediate=False): - """ Set all of the params in a group to the provided new params. Raises NotModifiableError if any of the - params to be changed are read only. - """ - changed = {} - - new_params = dict(params) - - for key in new_params.keys(): - if group.has_key(key): - param = group[key] - new_value = new_params[key] - - try: - old_value = param.value - except ValueError: - # some versions of boto have problems with retrieving - # integer values from params that may have their value - # based on a variable (ie. {foo*3/4}), so grab it in a - # way that bypasses the property functions - old_value = param._value - - if old_value != new_value: - if not param.is_modifiable: - raise NotModifiableError('Parameter %s is not modifiable.' % key) - - changed[key] = {'old': param.value, 'new': new_value} - - set_parameter(param, new_value, immediate) - - del new_params[key] - - return changed, new_params - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - state = dict(required=True, choices=['present', 'absent']), - name = dict(required=True), - engine = dict(required=False, choices=VALID_ENGINES), - description = dict(required=False), - params = dict(required=False, aliases=['parameters'], type='dict'), - immediate = dict(required=False, type='bool'), - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - state = module.params.get('state') - group_name = module.params.get('name').lower() - group_engine = module.params.get('engine') - group_description = module.params.get('description') - group_params = module.params.get('params') or {} - immediate = module.params.get('immediate') or False - - if state == 'present': - for required in ['name', 'description', 'engine', 'params']: - if not module.params.get(required): - module.fail_json(msg = str("Parameter %s required for state='present'" % required)) - else: - for not_allowed in ['description', 'engine', 'params']: - if module.params.get(not_allowed): - module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed)) - - # Retrieve any AWS settings from the environment. - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - if not region: - module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION.")) - - try: - conn = boto.rds.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - group_was_added = False - - try: - changed = False - - try: - all_groups = conn.get_all_dbparameter_groups(group_name, max_records=100) - exists = len(all_groups) > 0 - except BotoServerError, e: - if e.error_code != 'DBParameterGroupNotFound': - module.fail_json(msg = e.error_message) - exists = False - - if state == 'absent': - if exists: - conn.delete_parameter_group(group_name) - changed = True - else: - changed = {} - if not exists: - new_group = conn.create_parameter_group(group_name, engine=group_engine, description=group_description) - group_was_added = True - - # If a "Marker" is present, this group has more attributes remaining to check. Get the next batch, but only - # if there are parameters left to set. - marker = None - while len(group_params): - next_group = conn.get_all_dbparameters(group_name, marker=marker) - - changed_params, group_params = modify_group(next_group, group_params, immediate) - changed.update(changed_params) - - if hasattr(next_group, 'Marker'): - marker = next_group.Marker - else: - break - - - except BotoServerError, e: - module.fail_json(msg = e.error_message) - - except NotModifiableError, e: - msg = e.error_message - if group_was_added: - msg = '%s The group "%s" was added first.' % (msg, group_name) - module.fail_json(msg=msg) - - module.exit_json(changed=changed) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/rds_subnet_group b/library/cloud/rds_subnet_group deleted file mode 100644 index 1688856719a..00000000000 --- a/library/cloud/rds_subnet_group +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rds_subnet_group -version_added: "1.5" -short_description: manage RDS database subnet groups -description: - - Creates, modifies, and deletes RDS database subnet groups. This module has a dependency on python-boto >= 2.5. -options: - state: - description: - - Specifies whether the subnet should be present or absent. - required: true - default: present - aliases: [] - choices: [ 'present' , 'absent' ] - name: - description: - - Database subnet group identifier. - required: true - default: null - aliases: [] - description: - description: - - Database subnet group description. Only set when a new group is added. - required: false - default: null - aliases: [] - subnets: - description: - - List of subnet IDs that make up the database subnet group. - required: false - default: null - aliases: [] - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: true - default: null - aliases: [ 'aws_region', 'ec2_region' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_access_key', 'access_key' ] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_secret_key', 'secret_key' ] -requirements: [ "boto" ] -author: Scott Anderson -''' - -EXAMPLES = ''' -# Add or change a subnet group -- local_action: - module: rds_subnet_group - state: present - name: norwegian-blue - description: My Fancy Ex Parrot Subnet Group - subnets: - - subnet-aaaaaaaa - - subnet-bbbbbbbb - -# Remove a parameter group -- rds_param_group: > - state=absent - name=norwegian-blue -''' - -import sys -import time - -try: - import boto.rds - from boto.exception import BotoServerError -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - state = dict(required=True, choices=['present', 'absent']), - name = dict(required=True), - description = dict(required=False), - subnets = dict(required=False, type='list'), - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - state = module.params.get('state') - group_name = module.params.get('name').lower() - group_description = module.params.get('description') - group_subnets = module.params.get('subnets') or {} - - if state == 'present': - for required in ['name', 'description', 'subnets']: - if not module.params.get(required): - module.fail_json(msg = str("Parameter %s required for state='present'" % required)) - else: - for not_allowed in ['description', 'subnets']: - if module.params.get(not_allowed): - module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed)) - - # Retrieve any AWS settings from the environment. - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - if not region: - module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION.")) - - try: - conn = boto.rds.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - try: - changed = False - exists = False - - try: - matching_groups = conn.get_all_db_subnet_groups(group_name, max_records=100) - exists = len(matching_groups) > 0 - except BotoServerError, e: - if e.error_code != 'DBSubnetGroupNotFoundFault': - module.fail_json(msg = e.error_message) - - if state == 'absent': - if exists: - conn.delete_db_subnet_group(group_name) - changed = True - else: - if not exists: - new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=group_subnets) - - else: - changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets) - - except BotoServerError, e: - module.fail_json(msg = e.error_message) - - module.exit_json(changed=changed) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/route53 b/library/cloud/route53 deleted file mode 100644 index 0f511c23a79..00000000000 --- a/library/cloud/route53 +++ /dev/null @@ -1,281 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: route53 -version_added: "1.3" -short_description: add or delete entries in Amazons Route53 DNS service -description: - - Creates and deletes DNS records in Amazons Route53 service -options: - command: - description: - - Specifies the action to take. - required: true - default: null - aliases: [] - choices: [ 'get', 'create', 'delete' ] - zone: - description: - - The DNS zone to modify - required: true - default: null - aliases: [] - record: - description: - - The full DNS record to create or delete - required: true - default: null - aliases: [] - ttl: - description: - - The TTL to give the new record - required: false - default: 3600 (one hour) - aliases: [] - type: - description: - - The type of DNS record to create - required: true - default: null - aliases: [] - choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS' ] - value: - description: - - The new value when creating a DNS record. Multiple comma-spaced values are allowed. When deleting a record all values for the record must be specified or Route53 will not delete it. - required: false - default: null - aliases: [] - aws_secret_key: - description: - - AWS secret key. - required: false - default: null - aliases: ['ec2_secret_key', 'secret_key'] - aws_access_key: - description: - - AWS access key. - required: false - default: null - aliases: ['ec2_access_key', 'access_key'] - overwrite: - description: - - Whether an existing record should be overwritten on create if values do not match - required: false - default: null - aliases: [] - retry_interval: - description: - - In the case that route53 is still servicing a prior request, this module will wait and try again after this many seconds. If you have many domain names, the default of 500 seconds may be too long. - required: false - default: 500 - aliases: [] -requirements: [ "boto" ] -author: Bruce Pennypacker -''' - -EXAMPLES = ''' -# Add new.foo.com as an A record with 3 IPs -- route53: > - command=create - zone=foo.com - record=new.foo.com - type=A - ttl=7200 - value=1.1.1.1,2.2.2.2,3.3.3.3 - -# Retrieve the details for new.foo.com -- route53: > - command=get - zone=foo.com - record=new.foo.com - type=A - register: rec - -# Delete new.foo.com A record using the results from the get command -- route53: > - command=delete - zone=foo.com - record={{ rec.set.record }} - type={{ rec.set.type }} - value={{ rec.set.value }} - -# Add an AAAA record. Note that because there are colons in the value -# that the entire parameter list must be quoted: -- route53: > - command=create - zone=foo.com - record=localhost.foo.com - type=AAAA - ttl=7200 - value="::1" - -# Add a TXT record. Note that TXT and SPF records must be surrounded -# by quotes when sent to Route 53: -- route53: > - command=create - zone=foo.com - record=localhost.foo.com - type=TXT - ttl=7200 - value="\"bar\"" - - -''' - -import sys -import time - -try: - import boto - from boto import route53 - from boto.route53.record import ResourceRecordSets -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def commit(changes, retry_interval): - """Commit changes, but retry PriorRequestNotComplete errors.""" - retry = 10 - while True: - try: - retry -= 1 - return changes.commit() - except boto.route53.exception.DNSServerError, e: - code = e.body.split("")[1] - code = code.split("")[0] - if code != 'PriorRequestNotComplete' or retry < 0: - raise e - time.sleep(retry_interval) - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - command = dict(choices=['get', 'create', 'delete'], required=True), - zone = dict(required=True), - record = dict(required=True), - ttl = dict(required=False, default=3600), - type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), - value = dict(required=False), - overwrite = dict(required=False, type='bool'), - retry_interval = dict(required=False, default=500) - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - command_in = module.params.get('command') - zone_in = module.params.get('zone') - ttl_in = module.params.get('ttl') - record_in = module.params.get('record') - type_in = module.params.get('type') - value_in = module.params.get('value') - retry_interval_in = module.params.get('retry_interval') - - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - value_list = () - - if type(value_in) is str: - if value_in: - value_list = sorted(value_in.split(',')) - elif type(value_in) is list: - value_list = sorted(value_in) - - if zone_in[-1:] != '.': - zone_in += "." - - if record_in[-1:] != '.': - record_in += "." - - if command_in == 'create' or command_in == 'delete': - if not value_in: - module.fail_json(msg = "parameter 'value' required for create/delete") - - # connect to the route53 endpoint - try: - conn = boto.route53.connection.Route53Connection(aws_access_key, aws_secret_key) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - # Get all the existing hosted zones and save their ID's - zones = {} - results = conn.get_all_hosted_zones() - for r53zone in results['ListHostedZonesResponse']['HostedZones']: - zone_id = r53zone['Id'].replace('/hostedzone/', '') - zones[r53zone['Name']] = zone_id - - # Verify that the requested zone is already defined in Route53 - if not zone_in in zones: - errmsg = "Zone %s does not exist in Route53" % zone_in - module.fail_json(msg = errmsg) - - record = {} - - found_record = False - sets = conn.get_all_rrsets(zones[zone_in]) - for rset in sets: - # Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round - # tripping of things like * and @. - decoded_name = rset.name.replace(r'\052', '*') - decoded_name = decoded_name.replace(r'\100', '@') - - if rset.type == type_in and decoded_name == record_in: - found_record = True - record['zone'] = zone_in - record['type'] = rset.type - record['record'] = decoded_name - record['ttl'] = rset.ttl - record['value'] = ','.join(sorted(rset.resource_records)) - record['values'] = sorted(rset.resource_records) - if value_list == sorted(rset.resource_records) and record['ttl'] == ttl_in and command_in == 'create': - module.exit_json(changed=False) - - if command_in == 'get': - module.exit_json(changed=False, set=record) - - if command_in == 'delete' and not found_record: - module.exit_json(changed=False) - - changes = ResourceRecordSets(conn, zones[zone_in]) - - if command_in == 'create' and found_record: - if not module.params['overwrite']: - module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it") - else: - change = changes.add_change("DELETE", record_in, type_in, record['ttl']) - for v in record['values']: - change.add_value(v) - - if command_in == 'create' or command_in == 'delete': - change = changes.add_change(command_in.upper(), record_in, type_in, ttl_in) - for v in value_list: - change.add_value(v) - - try: - result = commit(changes, retry_interval_in) - except boto.route53.exception.DNSServerError, e: - txt = e.body.split("")[1] - txt = txt.split("")[0] - module.fail_json(msg = txt) - - module.exit_json(changed=True) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/s3 b/library/cloud/s3 deleted file mode 100644 index 6438c6405e7..00000000000 --- a/library/cloud/s3 +++ /dev/null @@ -1,514 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: s3 -short_description: S3 module putting a file into S3. -description: - - This module allows the user to dictate the presence of a given file in an S3 bucket. If or once the key (file) exists in the bucket, it returns a time-expired download URL. This module has a dependency on python-boto. -version_added: "1.1" -options: - bucket: - description: - - Bucket name. - required: true - default: null - aliases: [] - object: - description: - - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. - required: false - default: null - aliases: [] - version_added: "1.3" - src: - description: - - The source file path when performing a PUT operation. - required: false - default: null - aliases: [] - version_added: "1.3" - dest: - description: - - The destination file path when downloading an object/key with a GET operation. - required: false - aliases: [] - version_added: "1.3" - overwrite: - description: - - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. - required: false - default: true - version_added: "1.2" - mode: - description: - - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket) and delete (bucket). - required: true - default: null - aliases: [] - expiration: - description: - - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation. - required: false - default: 600 - aliases: [] - s3_url: - description: - - "S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined. Ansible tries to guess if fakes3 (https://github.com/jubos/fake-s3) or Eucalyptus Walrus (https://github.com/eucalyptus/eucalyptus/wiki/Walrus) is used and configure connection accordingly. Current heuristic is: everything with scheme fakes3:// is fakes3, everything else not ending with amazonaws.com is Walrus." - default: null - aliases: [ S3_URL ] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: null - aliases: ['ec2_secret_key', 'secret_key'] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_access_key', 'access_key' ] - metadata: - description: - - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. - required: false - default: null - version_added: "1.6" - region: - description: - - "AWS region to create the bucket in. If not set then the value of the EC2_REGION and AWS_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect." - required: false - default: null - version_added: "1.8" - -requirements: [ "boto" ] -author: Lester Wade, Ralph Tice -''' - -EXAMPLES = ''' -# Simple PUT operation -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put -# Simple GET operation -- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get -# GET/download and overwrite local file (trust remote) -- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get -# GET/download and do not overwrite local file (trust remote) -- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get force=false -# PUT/upload and overwrite remote file (trust local) -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put -# PUT/upload with metadata -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip' -# PUT/upload with multiple metadata -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' -# PUT/upload and do not overwrite remote file (trust local) -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put force=false -# Download an object as a string to use else where in your playbook -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=getstr -# Create an empty bucket -- s3: bucket=mybucket mode=create -# Create a bucket with key as directory -- s3: bucket=mybucket object=/my/directory/path mode=create -# Create an empty bucket in the EU region -- s3: bucket=mybucket mode=create region=eu-west-1 -# Delete a bucket and all contents -- s3: bucket=mybucket mode=delete -''' - -import sys -import os -import urlparse -import hashlib - -try: - import boto - from boto.s3.connection import Location -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def key_check(module, s3, bucket, obj): - try: - bucket = s3.lookup(bucket) - key_check = bucket.get_key(obj) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - if key_check: - return True - else: - return False - -def keysum(module, s3, bucket, obj): - bucket = s3.lookup(bucket) - key_check = bucket.get_key(obj) - if not key_check: - return None - md5_remote = key_check.etag[1:-1] - etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5 - if etag_multipart is True: - module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.") - return md5_remote - -def bucket_check(module, s3, bucket): - try: - result = s3.lookup(bucket) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - if result: - return True - else: - return False - -def create_bucket(module, s3, bucket, location=Location.DEFAULT): - try: - bucket = s3.create_bucket(bucket, location=location) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - if bucket: - return True - -def delete_bucket(module, s3, bucket): - try: - bucket = s3.lookup(bucket) - bucket_contents = bucket.list() - bucket.delete_keys([key.name for key in bucket_contents]) - bucket.delete() - return True - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def delete_key(module, s3, bucket, obj): - try: - bucket = s3.lookup(bucket) - bucket.delete_key(obj) - module.exit_json(msg="Object deleted from bucket %s"%bucket, changed=True) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def create_dirkey(module, s3, bucket, obj): - try: - bucket = s3.lookup(bucket) - key = bucket.new_key(obj) - key.set_contents_from_string('') - module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def upload_file_check(src): - if os.path.exists(src): - file_exists is True - else: - file_exists is False - if os.path.isdir(src): - module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True) - return file_exists - -def path_check(path): - if os.path.exists(path): - return True - else: - return False - -def upload_s3file(module, s3, bucket, obj, src, expiry, metadata): - try: - bucket = s3.lookup(bucket) - key = bucket.new_key(obj) - if metadata: - for meta_key in metadata.keys(): - key.set_metadata(meta_key, metadata[meta_key]) - - key.set_contents_from_filename(src) - url = key.generate_url(expiry) - module.exit_json(msg="PUT operation complete", url=url, changed=True) - except s3.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) - -def download_s3file(module, s3, bucket, obj, dest): - try: - bucket = s3.lookup(bucket) - key = bucket.lookup(obj) - key.get_contents_to_filename(dest) - module.exit_json(msg="GET operation complete", changed=True) - except s3.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) - -def download_s3str(module, s3, bucket, obj): - try: - bucket = s3.lookup(bucket) - key = bucket.lookup(obj) - contents = key.get_contents_as_string() - module.exit_json(msg="GET operation complete", contents=contents, changed=True) - except s3.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) - -def get_download_url(module, s3, bucket, obj, expiry, changed=True): - try: - bucket = s3.lookup(bucket) - key = bucket.lookup(obj) - url = key.generate_url(expiry) - module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def is_fakes3(s3_url): - """ Return True if s3_url has scheme fakes3:// """ - if s3_url is not None: - return urlparse.urlparse(s3_url).scheme == 'fakes3' - else: - return False - -def is_walrus(s3_url): - """ Return True if it's Walrus endpoint, not S3 - - We assume anything other than *.amazonaws.com is Walrus""" - if s3_url is not None: - o = urlparse.urlparse(s3_url) - return not o.hostname.endswith('amazonaws.com') - else: - return False - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - bucket = dict(required=True), - object = dict(), - src = dict(), - dest = dict(default=None), - mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr'], required=True), - expiry = dict(default=600, aliases=['expiration']), - s3_url = dict(aliases=['S3_URL']), - overwrite = dict(aliases=['force'], default=True, type='bool'), - metadata = dict(type='dict'), - ), - ) - module = AnsibleModule(argument_spec=argument_spec) - - bucket = module.params.get('bucket') - obj = module.params.get('object') - src = module.params.get('src') - if module.params.get('dest'): - dest = os.path.expanduser(module.params.get('dest')) - mode = module.params.get('mode') - expiry = int(module.params['expiry']) - s3_url = module.params.get('s3_url') - overwrite = module.params.get('overwrite') - metadata = module.params.get('metadata') - - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - if region in ('us-east-1', '', None): - # S3ism for the US Standard region - location = Location.DEFAULT - else: - # Boto uses symbolic names for locations but region strings will - # actually work fine for everything except us-east-1 (US Standard) - location = region - - if module.params.get('object'): - obj = os.path.expanduser(module.params['object']) - - # allow eucarc environment variables to be used if ansible vars aren't set - if not s3_url and 'S3_URL' in os.environ: - s3_url = os.environ['S3_URL'] - - # Look at s3_url and tweak connection settings - # if connecting to Walrus or fakes3 - if is_fakes3(s3_url): - try: - fakes3 = urlparse.urlparse(s3_url) - from boto.s3.connection import OrdinaryCallingFormat - s3 = boto.connect_s3( - aws_access_key, - aws_secret_key, - is_secure=False, - host=fakes3.hostname, - port=fakes3.port, - calling_format=OrdinaryCallingFormat()) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - elif is_walrus(s3_url): - try: - walrus = urlparse.urlparse(s3_url).hostname - s3 = boto.connect_walrus(walrus, aws_access_key, aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - else: - try: - s3 = boto.connect_s3(aws_access_key, aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - - # If our mode is a GET operation (download), go through the procedure as appropriate ... - if mode == 'get': - - # First, we check to see if the bucket exists, we get "bucket" returned. - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is False: - module.fail_json(msg="Target bucket cannot be found", failed=True) - - # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check. - keyrtn = key_check(module, s3, bucket, obj) - if keyrtn is False: - module.fail_json(msg="Target key cannot be found", failed=True) - - # If the destination path doesn't exist, no need to md5um etag check, so just download. - pathrtn = path_check(dest) - if pathrtn is False: - download_s3file(module, s3, bucket, obj, dest) - - # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. - if pathrtn is True: - md5_remote = keysum(module, s3, bucket, obj) - md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest() - if md5_local == md5_remote: - sum_matches = True - if overwrite is True: - download_s3file(module, s3, bucket, obj, dest) - else: - module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False) - else: - sum_matches = False - if overwrite is True: - download_s3file(module, s3, bucket, obj, dest) - else: - module.fail_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True) - - # Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message. - if sum_matches is True and overwrite is False: - module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False) - - # At this point explicitly define the overwrite condition. - if sum_matches is True and pathrtn is True and overwrite is True: - download_s3file(module, s3, bucket, obj, dest) - - # If sum does not match but the destination exists, we - - # if our mode is a PUT operation (upload), go through the procedure as appropriate ... - if mode == 'put': - - # Use this snippet to debug through conditionals: -# module.exit_json(msg="Bucket return %s"%bucketrtn) -# sys.exit(0) - - # Lets check the src path. - pathrtn = path_check(src) - if pathrtn is False: - module.fail_json(msg="Local object for PUT does not exist", failed=True) - - # Lets check to see if bucket exists to get ground truth. - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is True: - keyrtn = key_check(module, s3, bucket, obj) - - # Lets check key state. Does it exist and if it does, compute the etag md5sum. - if bucketrtn is True and keyrtn is True: - md5_remote = keysum(module, s3, bucket, obj) - md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest() - if md5_local == md5_remote: - sum_matches = True - if overwrite is True: - upload_s3file(module, s3, bucket, obj, src, expiry, metadata) - else: - get_download_url(module, s3, bucket, obj, expiry, changed=False) - else: - sum_matches = False - if overwrite is True: - upload_s3file(module, s3, bucket, obj, src, expiry, metadata) - else: - module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True) - - # If neither exist (based on bucket existence), we can create both. - if bucketrtn is False and pathrtn is True: - create_bucket(module, s3, bucket, location) - upload_s3file(module, s3, bucket, obj, src, expiry, metadata) - - # If bucket exists but key doesn't, just upload. - if bucketrtn is True and pathrtn is True and keyrtn is False: - upload_s3file(module, s3, bucket, obj, src, expiry, metadata) - - # Support for deleting an object if we have both params. - if mode == 'delete': - if bucket: - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is True: - deletertn = delete_bucket(module, s3, bucket) - if deletertn is True: - module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=True) - else: - module.fail_json(msg="Bucket does not exist.", changed=False) - else: - module.fail_json(msg="Bucket parameter is required.", failed=True) - - # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. - # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. - if mode == 'create': - if bucket and not obj: - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is True: - module.exit_json(msg="Bucket already exists.", changed=False) - else: - module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location)) - if bucket and obj: - bucketrtn = bucket_check(module, s3, bucket) - if obj.endswith('/'): - dirobj = obj - else: - dirobj = obj + "/" - if bucketrtn is True: - keyrtn = key_check(module, s3, bucket, dirobj) - if keyrtn is True: - module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False) - else: - create_dirkey(module, s3, bucket, dirobj) - if bucketrtn is False: - created = create_bucket(module, s3, bucket, location) - create_dirkey(module, s3, bucket, dirobj) - - # Support for grabbing the time-expired URL for an object in S3/Walrus. - if mode == 'geturl': - if bucket and obj: - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is False: - module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True) - else: - keyrtn = key_check(module, s3, bucket, obj) - if keyrtn is True: - get_download_url(module, s3, bucket, obj, expiry) - else: - module.fail_json(msg="Key %s does not exist."%obj, failed=True) - else: - module.fail_json(msg="Bucket and Object parameters must be set", failed=True) - - if mode == 'getstr': - if bucket and obj: - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is False: - module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True) - else: - keyrtn = key_check(module, s3, bucket, obj) - if keyrtn is True: - download_s3str(module, s3, bucket, obj) - else: - module.fail_json(msg="Key %s does not exist."%obj, failed=True) - - module.exit_json(failed=False) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/virt b/library/cloud/virt deleted file mode 100644 index f1d36fc1964..00000000000 --- a/library/cloud/virt +++ /dev/null @@ -1,493 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Virt management features - -Copyright 2007, 2012 Red Hat, Inc -Michael DeHaan -Seth Vidal - -This software may be freely redistributed under the terms of the GNU -general public license. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: virt -short_description: Manages virtual machines supported by libvirt -description: - - Manages virtual machines supported by I(libvirt). -version_added: "0.2" -options: - name: - description: - - name of the guest VM being managed. Note that VM must be previously - defined with xml. - required: true - default: null - aliases: [] - state: - description: - - Note that there may be some lag for state requests like C(shutdown) - since these refer only to VM states. After starting a guest, it may not - be immediately accessible. - required: false - choices: [ "running", "shutdown", "destroyed", "paused" ] - default: "no" - command: - description: - - in addition to state management, various non-idempotent commands are available. See examples - required: false - choices: ["create","status", "start", "stop", "pause", "unpause", - "shutdown", "undefine", "destroy", "get_xml", "autostart", - "freemem", "list_vms", "info", "nodeinfo", "virttype", "define"] - uri: - description: - - libvirt connection uri - required: false - defaults: qemu:///system - xml: - description: - - XML document used with the define command - required: false - default: null -requirements: [ "libvirt" ] -author: Michael DeHaan, Seth Vidal -''' - -EXAMPLES = ''' -# a playbook task line: -- virt: name=alpha state=running - -# /usr/bin/ansible invocations -ansible host -m virt -a "name=alpha command=status" -ansible host -m virt -a "name=alpha command=get_xml" -ansible host -m virt -a "name=alpha command=create uri=lxc:///" - -# a playbook example of defining and launching an LXC guest -tasks: - - name: define vm - virt: name=foo - command=define - xml="{{ lookup('template', 'container-template.xml.j2') }}" - uri=lxc:/// - - name: start vm - virt: name=foo state=running uri=lxc:/// -''' - -VIRT_FAILED = 1 -VIRT_SUCCESS = 0 -VIRT_UNAVAILABLE=2 - -import sys - -try: - import libvirt -except ImportError: - print "failed=True msg='libvirt python module unavailable'" - sys.exit(1) - -ALL_COMMANDS = [] -VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause', - 'shutdown', 'undefine', 'destroy', 'get_xml', 'autostart', 'define'] -HOST_COMMANDS = ['freemem', 'list_vms', 'info', 'nodeinfo', 'virttype'] -ALL_COMMANDS.extend(VM_COMMANDS) -ALL_COMMANDS.extend(HOST_COMMANDS) - -VIRT_STATE_NAME_MAP = { - 0 : "running", - 1 : "running", - 2 : "running", - 3 : "paused", - 4 : "shutdown", - 5 : "shutdown", - 6 : "crashed" -} - -class VMNotFound(Exception): - pass - -class LibvirtConnection(object): - - def __init__(self, uri, module): - - self.module = module - - cmd = "uname -r" - rc, stdout, stderr = self.module.run_command(cmd) - - if "xen" in stdout: - conn = libvirt.open(None) - else: - conn = libvirt.open(uri) - - if not conn: - raise Exception("hypervisor connection failure") - - self.conn = conn - - def find_vm(self, vmid): - """ - Extra bonus feature: vmid = -1 returns a list of everything - """ - conn = self.conn - - vms = [] - - # this block of code borrowed from virt-manager: - # get working domain's name - ids = conn.listDomainsID() - for id in ids: - vm = conn.lookupByID(id) - vms.append(vm) - # get defined domain - names = conn.listDefinedDomains() - for name in names: - vm = conn.lookupByName(name) - vms.append(vm) - - if vmid == -1: - return vms - - for vm in vms: - if vm.name() == vmid: - return vm - - raise VMNotFound("virtual machine %s not found" % vmid) - - def shutdown(self, vmid): - return self.find_vm(vmid).shutdown() - - def pause(self, vmid): - return self.suspend(self.conn,vmid) - - def unpause(self, vmid): - return self.resume(self.conn,vmid) - - def suspend(self, vmid): - return self.find_vm(vmid).suspend() - - def resume(self, vmid): - return self.find_vm(vmid).resume() - - def create(self, vmid): - return self.find_vm(vmid).create() - - def destroy(self, vmid): - return self.find_vm(vmid).destroy() - - def undefine(self, vmid): - return self.find_vm(vmid).undefine() - - def get_status2(self, vm): - state = vm.info()[0] - return VIRT_STATE_NAME_MAP.get(state,"unknown") - - def get_status(self, vmid): - state = self.find_vm(vmid).info()[0] - return VIRT_STATE_NAME_MAP.get(state,"unknown") - - def nodeinfo(self): - return self.conn.getInfo() - - def get_type(self): - return self.conn.getType() - - def get_xml(self, vmid): - vm = self.conn.lookupByName(vmid) - return vm.XMLDesc(0) - - def get_maxVcpus(self, vmid): - vm = self.conn.lookupByName(vmid) - return vm.maxVcpus() - - def get_maxMemory(self, vmid): - vm = self.conn.lookupByName(vmid) - return vm.maxMemory() - - def getFreeMemory(self): - return self.conn.getFreeMemory() - - def get_autostart(self, vmid): - vm = self.conn.lookupByName(vmid) - return vm.autostart() - - def set_autostart(self, vmid, val): - vm = self.conn.lookupByName(vmid) - return vm.setAutostart(val) - - def define_from_xml(self, xml): - return self.conn.defineXML(xml) - - -class Virt(object): - - def __init__(self, uri, module): - self.module = module - self.uri = uri - - def __get_conn(self): - self.conn = LibvirtConnection(self.uri, self.module) - return self.conn - - def get_vm(self, vmid): - self.__get_conn() - return self.conn.find_vm(vmid) - - def state(self): - vms = self.list_vms() - state = [] - for vm in vms: - state_blurb = self.conn.get_status(vm) - state.append("%s %s" % (vm,state_blurb)) - return state - - def info(self): - vms = self.list_vms() - info = dict() - for vm in vms: - data = self.conn.find_vm(vm).info() - # libvirt returns maxMem, memory, and cpuTime as long()'s, which - # xmlrpclib tries to convert to regular int's during serialization. - # This throws exceptions, so convert them to strings here and - # assume the other end of the xmlrpc connection can figure things - # out or doesn't care. - info[vm] = { - "state" : VIRT_STATE_NAME_MAP.get(data[0],"unknown"), - "maxMem" : str(data[1]), - "memory" : str(data[2]), - "nrVirtCpu" : data[3], - "cpuTime" : str(data[4]), - } - info[vm]["autostart"] = self.conn.get_autostart(vm) - - return info - - def nodeinfo(self): - self.__get_conn() - info = dict() - data = self.conn.nodeinfo() - info = { - "cpumodel" : str(data[0]), - "phymemory" : str(data[1]), - "cpus" : str(data[2]), - "cpumhz" : str(data[3]), - "numanodes" : str(data[4]), - "sockets" : str(data[5]), - "cpucores" : str(data[6]), - "cputhreads" : str(data[7]) - } - return info - - def list_vms(self, state=None): - self.conn = self.__get_conn() - vms = self.conn.find_vm(-1) - results = [] - for x in vms: - try: - if state: - vmstate = self.conn.get_status2(x) - if vmstate == state: - results.append(x.name()) - else: - results.append(x.name()) - except: - pass - return results - - def virttype(self): - return self.__get_conn().get_type() - - def autostart(self, vmid): - self.conn = self.__get_conn() - return self.conn.set_autostart(vmid, True) - - def freemem(self): - self.conn = self.__get_conn() - return self.conn.getFreeMemory() - - def shutdown(self, vmid): - """ Make the machine with the given vmid stop running. Whatever that takes. """ - self.__get_conn() - self.conn.shutdown(vmid) - return 0 - - - def pause(self, vmid): - """ Pause the machine with the given vmid. """ - - self.__get_conn() - return self.conn.suspend(vmid) - - def unpause(self, vmid): - """ Unpause the machine with the given vmid. """ - - self.__get_conn() - return self.conn.resume(vmid) - - def create(self, vmid): - """ Start the machine via the given vmid """ - - self.__get_conn() - return self.conn.create(vmid) - - def start(self, vmid): - """ Start the machine via the given id/name """ - - self.__get_conn() - return self.conn.create(vmid) - - def destroy(self, vmid): - """ Pull the virtual power from the virtual domain, giving it virtually no time to virtually shut down. """ - self.__get_conn() - return self.conn.destroy(vmid) - - def undefine(self, vmid): - """ Stop a domain, and then wipe it from the face of the earth. (delete disk/config file) """ - - self.__get_conn() - return self.conn.undefine(vmid) - - def status(self, vmid): - """ - Return a state suitable for server consumption. Aka, codes.py values, not XM output. - """ - self.__get_conn() - return self.conn.get_status(vmid) - - def get_xml(self, vmid): - """ - Receive a Vm id as input - Return an xml describing vm config returned by a libvirt call - """ - - self.__get_conn() - return self.conn.get_xml(vmid) - - def get_maxVcpus(self, vmid): - """ - Gets the max number of VCPUs on a guest - """ - - self.__get_conn() - return self.conn.get_maxVcpus(vmid) - - def get_max_memory(self, vmid): - """ - Gets the max memory on a guest - """ - - self.__get_conn() - return self.conn.get_MaxMemory(vmid) - - def define(self, xml): - """ - Define a guest with the given xml - """ - self.__get_conn() - return self.conn.define_from_xml(xml) - -def core(module): - - state = module.params.get('state', None) - guest = module.params.get('name', None) - command = module.params.get('command', None) - uri = module.params.get('uri', None) - xml = module.params.get('xml', None) - - v = Virt(uri, module) - res = {} - - if state and command=='list_vms': - res = v.list_vms(state=state) - if type(res) != dict: - res = { command: res } - return VIRT_SUCCESS, res - - if state: - if not guest: - module.fail_json(msg = "state change requires a guest specified") - - res['changed'] = False - if state == 'running': - if v.status(guest) is 'paused': - res['changed'] = True - res['msg'] = v.unpause(guest) - elif v.status(guest) is not 'running': - res['changed'] = True - res['msg'] = v.start(guest) - elif state == 'shutdown': - if v.status(guest) is not 'shutdown': - res['changed'] = True - res['msg'] = v.shutdown(guest) - elif state == 'destroyed': - if v.status(guest) is not 'shutdown': - res['changed'] = True - res['msg'] = v.destroy(guest) - elif state == 'paused': - if v.status(guest) is 'running': - res['changed'] = True - res['msg'] = v.pause(guest) - else: - module.fail_json(msg="unexpected state") - - return VIRT_SUCCESS, res - - if command: - if command in VM_COMMANDS: - if not guest: - module.fail_json(msg = "%s requires 1 argument: guest" % command) - if command == 'define': - if not xml: - module.fail_json(msg = "define requires xml argument") - try: - v.get_vm(guest) - except VMNotFound: - v.define(xml) - res = {'changed': True, 'created': guest} - return VIRT_SUCCESS, res - res = getattr(v, command)(guest) - if type(res) != dict: - res = { command: res } - return VIRT_SUCCESS, res - - elif hasattr(v, command): - res = getattr(v, command)() - if type(res) != dict: - res = { command: res } - return VIRT_SUCCESS, res - - else: - module.fail_json(msg="Command %s not recognized" % basecmd) - - module.fail_json(msg="expected state or command parameter to be specified") - -def main(): - - module = AnsibleModule(argument_spec=dict( - name = dict(aliases=['guest']), - state = dict(choices=['running', 'shutdown', 'destroyed', 'paused']), - command = dict(choices=ALL_COMMANDS), - uri = dict(default='qemu:///system'), - xml = dict(), - )) - - rc = VIRT_SUCCESS - try: - rc, result = core(module) - except Exception, e: - module.fail_json(msg=str(e)) - - if rc != 0: # something went wrong emit the msg - module.fail_json(rc=rc, msg=result) - else: - module.exit_json(**result) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/cloud/vsphere_guest b/library/cloud/vsphere_guest deleted file mode 100644 index e3dcc4d28ea..00000000000 --- a/library/cloud/vsphere_guest +++ /dev/null @@ -1,1225 +0,0 @@ -#!/usr/bin/python - -# -*- coding: utf-8 -*- - -# TODO: -# Ability to set CPU/Memory reservations - -try: - import json -except ImportError: - import simplejson as json - -HAS_PYSPHERE = False -try: - from pysphere import VIServer, VIProperty, MORTypes - from pysphere.resources import VimService_services as VI - from pysphere.vi_task import VITask - from pysphere import VIException, VIApiException, FaultTypes - HAS_PYSPHERE = True -except ImportError: - pass - -DOCUMENTATION = ''' ---- -module: vsphere_guest -short_description: Create/delete/manage a guest VM through VMware vSphere. -description: - - Create/delete/reconfigure a guest VM through VMware vSphere. This module has a dependency on pysphere >= 1.7 -version_added: "1.6" -options: - vcenter_hostname: - description: - - The hostname of the vcenter server the module will connect to, to create the guest. - required: true - default: null - aliases: [] - guest: - description: - - The virtual server name you wish to manage. - required: true - user: - description: - - Username to connect to vcenter as. - required: true - default: null - password: - description: - - Password of the user to connect to vcenter as. - required: true - default: null - resource_pool: - description: - - The name of the resource_pool to create the VM in. - required: false - default: None - cluster: - description: - - The name of the cluster to create the VM in. By default this is derived from the host you tell the module to build the guest on. - required: false - default: None - esxi: - description: - - Dictionary which includes datacenter and hostname on which the VM should be created. For standalone ESXi hosts, ha-datacenter should be used as the datacenter name - required: false - default: null - state: - description: - - Indicate desired state of the vm. - default: present - choices: ['present', 'powered_on', 'absent', 'powered_on', 'restarted', 'reconfigured'] - vm_disk: - description: - - A key, value list of disks and their sizes and which datastore to keep it in. - required: false - default: null - vm_hardware: - description: - - A key, value list of VM config settings. Must include ['memory_mb', 'num_cpus', 'osid', 'scsi']. - required: false - default: null - vm_nic: - description: - - A key, value list of nics, their types and what network to put them on. - required: false - default: null - vm_extra_config: - description: - - A key, value pair of any extra values you want set or changed in the vmx file of the VM. Useful to set advanced options on the VM. - required: false - default: null - vm_hw_version: - description: - - Desired hardware version identifier (for example, "vmx-08" for vms that needs to be managed with vSphere Client). Note that changing hardware version of existing vm is not supported. - required: false - default: null - version_added: "1.7" - vmware_guest_facts: - description: - - Gather facts from vCenter on a particular VM - required: false - default: null - force: - description: - - Boolean. Allows you to run commands which may alter the running state of a guest. Also used to reconfigure and destroy. - default: "no" - choices: [ "yes", "no" ] - -notes: - - This module should run from a system that can access vSphere directly. - Either by using local_action, or using delegate_to. -author: Richard Hoop -requirements: [ pysphere ] -''' - - -EXAMPLES = ''' -# Create a new VM on an ESX server -# Returns changed = False when the VM already exists -# Returns changed = True and a adds ansible_facts from the new VM -# State will set the power status of a guest upon creation. Use powered_on to create and boot. -# Options ['state', 'vm_extra_config', 'vm_disk', 'vm_nic', 'vm_hardware', 'esxi'] are required together - -- vsphere_guest: - vcenter_hostname: vcenter.mydomain.local - username: myuser - password: mypass - guest: newvm001 - state: powered_on - vm_extra_config: - vcpu.hotadd: yes - mem.hotadd: yes - notes: This is a test VM - vm_disk: - disk1: - size_gb: 10 - type: thin - datastore: storage001 - vm_nic: - nic1: - type: vmxnet3 - network: VM Network - network_type: standard - vm_hardware: - memory_mb: 2048 - num_cpus: 2 - osid: centos64Guest - scsi: paravirtual - esxi: - datacenter: MyDatacenter - hostname: esx001.mydomain.local - -# Reconfigure the CPU and Memory on the newly created VM -# Will return the changes made - -- vsphere_guest: - vcenter_hostname: vcenter.mydomain.local - username: myuser - password: mypass - guest: newvm001 - state: reconfigured - vm_extra_config: - vcpu.hotadd: yes - mem.hotadd: yes - notes: This is a test VM - vm_disk: - disk1: - size_gb: 10 - type: thin - datastore: storage001 - vm_nic: - nic1: - type: vmxnet3 - network: VM Network - network_type: standard - vm_hardware: - memory_mb: 4096 - num_cpus: 4 - osid: centos64Guest - scsi: paravirtual - esxi: - datacenter: MyDatacenter - hostname: esx001.mydomain.local - -# Task to gather facts from a vSphere cluster only if the system is a VMWare guest - -- vsphere_guest: - vcenter_hostname: vcenter.mydomain.local - username: myuser - password: mypass - guest: newvm001 - vmware_guest_facts: yes - - -# Typical output of a vsphere_facts run on a guest - -- hw_eth0: - - addresstype: "assigned" - label: "Network adapter 1" - macaddress: "00:22:33:33:44:55" - macaddress_dash: "00-22-33-33-44-55" - summary: "VM Network" - hw_guest_full_name: "newvm001" - hw_guest_id: "rhel6_64Guest" - hw_memtotal_mb: 2048 - hw_name: "centos64Guest" - hw_processor_count: 2 - hw_product_uuid: "ef50bac8-2845-40ff-81d9-675315501dac" - -# Remove a vm from vSphere -# The VM must be powered_off of you need to use force to force a shutdown - -- vsphere_guest: - vcenter_hostname: vcenter.mydomain.local - username: myuser - password: mypass - guest: newvm001 - state: absent - force: yes -''' - -def add_scsi_controller(module, s, config, devices, type="paravirtual", bus_num=0, disk_ctrl_key=1): - # add a scsi controller - scsi_ctrl_spec = config.new_deviceChange() - scsi_ctrl_spec.set_element_operation('add') - - if type == "lsi": - # For RHEL5 - scsi_ctrl = VI.ns0.VirtualLsiLogicController_Def("scsi_ctrl").pyclass() - elif type == "paravirtual": - # For RHEL6 - scsi_ctrl = VI.ns0.ParaVirtualSCSIController_Def("scsi_ctrl").pyclass() - elif type == "lsi_sas": - scsi_ctrl = VI.ns0.VirtualLsiLogicSASController_Def( - "scsi_ctrl").pyclass() - elif type == "bus_logic": - scsi_ctrl = VI.ns0.VirtualBusLogicController_Def("scsi_ctrl").pyclass() - else: - s.disconnect() - module.fail_json( - msg="Error adding scsi controller to vm spec. No scsi controller" - " type of: %s" % (type)) - - scsi_ctrl.set_element_busNumber(int(bus_num)) - scsi_ctrl.set_element_key(int(disk_ctrl_key)) - scsi_ctrl.set_element_sharedBus("noSharing") - scsi_ctrl_spec.set_element_device(scsi_ctrl) - # Add the scsi controller to the VM spec. - devices.append(scsi_ctrl_spec) - return disk_ctrl_key - - -def add_disk(module, s, config_target, config, devices, datastore, type="thin", size=200000, disk_ctrl_key=1, disk_number=0, key=0): - # add a vmdk disk - # Verify the datastore exists - datastore_name, ds = find_datastore(module, s, datastore, config_target) - # create a new disk - file based - for the vm - disk_spec = config.new_deviceChange() - disk_spec.set_element_fileOperation("create") - disk_spec.set_element_operation("add") - disk_ctlr = VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass() - disk_backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def( - "disk_backing").pyclass() - disk_backing.set_element_fileName(datastore_name) - disk_backing.set_element_diskMode("persistent") - if type != "thick": - disk_backing.set_element_thinProvisioned(1) - disk_ctlr.set_element_key(key) - disk_ctlr.set_element_controllerKey(int(disk_ctrl_key)) - disk_ctlr.set_element_unitNumber(int(disk_number)) - disk_ctlr.set_element_backing(disk_backing) - disk_ctlr.set_element_capacityInKB(int(size)) - disk_spec.set_element_device(disk_ctlr) - devices.append(disk_spec) - - -def add_cdrom(module, s, config_target, config, devices, default_devs, type="client", vm_cd_iso_path=None): - # Add a cd-rom - # Make sure the datastore exists. - if vm_cd_iso_path: - iso_location = vm_cd_iso_path.split('/', 1) - datastore, ds = find_datastore( - module, s, iso_location[0], config_target) - iso_path = iso_location[1] - - # find ide controller - ide_ctlr = None - for dev in default_devs: - if dev.typecode.type[1] == "VirtualIDEController": - ide_ctlr = dev - - # add a cdrom based on a physical device - if ide_ctlr: - cd_spec = config.new_deviceChange() - cd_spec.set_element_operation('add') - cd_ctrl = VI.ns0.VirtualCdrom_Def("cd_ctrl").pyclass() - - if type == "iso": - iso = VI.ns0.VirtualCdromIsoBackingInfo_Def("iso").pyclass() - ds_ref = iso.new_datastore(ds) - ds_ref.set_attribute_type(ds.get_attribute_type()) - iso.set_element_datastore(ds_ref) - iso.set_element_fileName("%s %s" % (datastore, iso_path)) - cd_ctrl.set_element_backing(iso) - cd_ctrl.set_element_key(20) - cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key()) - cd_ctrl.set_element_unitNumber(0) - cd_spec.set_element_device(cd_ctrl) - elif type == "client": - client = VI.ns0.VirtualCdromRemoteAtapiBackingInfo_Def( - "client").pyclass() - client.set_element_deviceName("") - cd_ctrl.set_element_backing(client) - cd_ctrl.set_element_key(20) - cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key()) - cd_ctrl.set_element_unitNumber(0) - cd_spec.set_element_device(cd_ctrl) - else: - s.disconnect() - module.fail_json( - msg="Error adding cdrom of type %s to vm spec. " - " cdrom type can either be iso or client" % (type)) - - devices.append(cd_spec) - - -def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name="VM Network", network_type="standard"): - # add a NIC - # Different network card types are: "VirtualE1000", - # "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet", "VirtualNmxnet2", - # "VirtualVmxnet3" - nic_spec = config.new_deviceChange() - nic_spec.set_element_operation("add") - - if nic_type == "e1000": - nic_ctlr = VI.ns0.VirtualE1000_Def("nic_ctlr").pyclass() - elif nic_type == "e1000e": - nic_ctlr = VI.ns0.VirtualE1000e_Def("nic_ctlr").pyclass() - elif nic_type == "pcnet32": - nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass() - elif nic_type == "vmxnet": - nic_ctlr = VI.ns0.VirtualVmxnet_Def("nic_ctlr").pyclass() - elif nic_type == "vmxnet2": - nic_ctlr = VI.ns0.VirtualVmxnet2_Def("nic_ctlr").pyclass() - elif nic_type == "vmxnet3": - nic_ctlr = VI.ns0.VirtualVmxnet3_Def("nic_ctlr").pyclass() - else: - s.disconnect() - module.fail_json( - msg="Error adding nic to vm spec. No nic type of: %s" % - (nic_type)) - - if network_type == "standard": - nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def( - "nic_backing").pyclass() - nic_backing.set_element_deviceName(network_name) - elif network_type == "dvs": - # Get the portgroup key - portgroupKey = find_portgroup_key(module, s, nfmor, network_name) - # Get the dvswitch uuid - dvswitch_uuid = find_dvswitch_uuid(module, s, nfmor, portgroupKey) - - nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def( - "nic_backing_port").pyclass() - nic_backing_port.set_element_switchUuid(dvswitch_uuid) - nic_backing_port.set_element_portgroupKey(portgroupKey) - - nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def( - "nic_backing").pyclass() - nic_backing.set_element_port(nic_backing_port) - else: - s.disconnect() - module.fail_json( - msg="Error adding nic backing to vm spec. No network type of:" - " %s" % (network_type)) - - nic_ctlr.set_element_addressType("generated") - nic_ctlr.set_element_backing(nic_backing) - nic_ctlr.set_element_key(4) - nic_spec.set_element_device(nic_ctlr) - devices.append(nic_spec) - - -def find_datastore(module, s, datastore, config_target): - # Verify the datastore exists and put it in brackets if it does. - ds = None - for d in config_target.Datastore: - if (d.Datastore.Accessible and - (datastore and d.Datastore.Name == datastore) - or (not datastore)): - ds = d.Datastore.Datastore - datastore = d.Datastore.Name - break - if not ds: - s.disconnect() - module.fail_json(msg="Datastore: %s does not appear to exist" % - (datastore)) - - datastore_name = "[%s]" % datastore - return datastore_name, ds - - -def find_portgroup_key(module, s, nfmor, network_name): - # Find a portgroups key given the portgroup name. - - # Grab all the distributed virtual portgroup's names and key's. - dvpg_mors = s._retrieve_properties_traversal( - property_names=['name', 'key'], - from_node=nfmor, obj_type='DistributedVirtualPortgroup') - - # Get the correct portgroup managed object. - dvpg_mor = None - for dvpg in dvpg_mors: - if dvpg_mor: - break - for p in dvpg.PropSet: - if p.Name == "name" and p.Val == network_name: - dvpg_mor = dvpg - if dvpg_mor: - break - - # If dvpg_mor is empty we didn't find the named portgroup. - if dvpg_mor is None: - s.disconnect() - module.fail_json( - msg="Could not find the distributed virtual portgroup named" - " %s" % network_name) - - # Get the portgroup key - portgroupKey = None - for p in dvpg_mor.PropSet: - if p.Name == "key": - portgroupKey = p.Val - - return portgroupKey - - -def find_dvswitch_uuid(module, s, nfmor, portgroupKey): - # Find a dvswitch's uuid given a portgroup key. - # Function searches all dvswitches in the datacenter to find the switch - # that has the portgroup key. - - # Grab the dvswitch uuid and portgroup properties - dvswitch_mors = s._retrieve_properties_traversal( - property_names=['uuid', 'portgroup'], - from_node=nfmor, obj_type='DistributedVirtualSwitch') - - dvswitch_mor = None - # Get the dvswitches managed object - for dvswitch in dvswitch_mors: - if dvswitch_mor: - break - for p in dvswitch.PropSet: - if p.Name == "portgroup": - pg_mors = p.Val.ManagedObjectReference - for pg_mor in pg_mors: - if dvswitch_mor: - break - key_mor = s._get_object_properties( - pg_mor, property_names=['key']) - for key in key_mor.PropSet: - if key.Val == portgroupKey: - dvswitch_mor = dvswitch - - # Get the switches uuid - dvswitch_uuid = None - for p in dvswitch_mor.PropSet: - if p.Name == "uuid": - dvswitch_uuid = p.Val - - return dvswitch_uuid - - -def spec_singleton(spec, request, vm): - - if not spec: - _this = request.new__this(vm._mor) - _this.set_attribute_type(vm._mor.get_attribute_type()) - request.set_element__this(_this) - spec = request.new_spec() - return spec - - -def vmdisk_id(vm, current_datastore_name): - id_list = [] - for vm_disk in vm._disks: - if current_datastore_name in vm_disk['descriptor']: - id_list.append(vm_disk['device']['key']) - return id_list - - -def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force): - spec = None - changed = False - changes = {} - request = VI.ReconfigVM_TaskRequestMsg() - shutdown = False - - memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled) - cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled) - cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled) - - # Change Memory - if vm_hardware['memory_mb']: - - if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB: - spec = spec_singleton(spec, request, vm) - - if vm.is_powered_on(): - if force: - # No hot add but force - if not memoryHotAddEnabled: - shutdown = True - elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB: - shutdown = True - else: - # Fail on no hot add and no force - if not memoryHotAddEnabled: - module.fail_json( - msg="memoryHotAdd is not enabled. force is " - "required for shutdown") - - # Fail on no force and memory shrink - elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB: - module.fail_json( - msg="Cannot lower memory on a live VM. force is " - "required for shutdown") - - # set the new RAM size - spec.set_element_memoryMB(int(vm_hardware['memory_mb'])) - changes['memory'] = vm_hardware['memory_mb'] - - # ====( Config Memory )====# - if vm_hardware['num_cpus']: - if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU: - spec = spec_singleton(spec, request, vm) - - if vm.is_powered_on(): - if force: - # No hot add but force - if not cpuHotAddEnabled: - shutdown = True - elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU: - if not cpuHotRemoveEnabled: - shutdown = True - else: - # Fail on no hot add and no force - if not cpuHotAddEnabled: - module.fail_json( - msg="cpuHotAdd is not enabled. force is " - "required for shutdown") - - # Fail on no force and cpu shrink without hot remove - elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU: - if not cpuHotRemoveEnabled: - module.fail_json( - msg="Cannot lower CPU on a live VM without " - "cpuHotRemove. force is required for shutdown") - - spec.set_element_numCPUs(int(vm_hardware['num_cpus'])) - - changes['cpu'] = vm_hardware['num_cpus'] - - if len(changes): - - if shutdown and vm.is_powered_on(): - try: - vm.power_off(sync_run=True) - vm.get_status() - - except Exception, e: - module.fail_json( - msg='Failed to shutdown vm %s: %s' % (guest, e) - ) - - request.set_element_spec(spec) - ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval - - # Wait for the task to finish - task = VITask(ret, vsphere_client) - status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) - if status == task.STATE_SUCCESS: - changed = True - elif status == task.STATE_ERROR: - module.fail_json( - msg="Error reconfiguring vm: %s" % task.get_error_message()) - - if vm.is_powered_off(): - try: - vm.power_on(sync_run=True) - except Exception, e: - module.fail_json( - msg='Failed to power on vm %s : %s' % (guest, e) - ) - - vsphere_client.disconnect() - if changed: - module.exit_json(changed=True, changes=changes) - - module.exit_json(changed=False) - - -def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, vm_hw_version, state): - - datacenter = esxi['datacenter'] - esxi_hostname = esxi['hostname'] - # Datacenter managed object reference - dclist = [k for k, - v in vsphere_client.get_datacenters().items() if v == datacenter] - if dclist: - dcmor=dclist[0] - else: - vsphere_client.disconnect() - module.fail_json(msg="Cannot find datacenter named: %s" % datacenter) - - dcprops = VIProperty(vsphere_client, dcmor) - - # hostFolder managed reference - hfmor = dcprops.hostFolder._obj - - # virtualmachineFolder managed object reference - vmfmor = dcprops.vmFolder._obj - - # networkFolder managed object reference - nfmor = dcprops.networkFolder._obj - - # Grab the computerResource name and host properties - crmors = vsphere_client._retrieve_properties_traversal( - property_names=['name', 'host'], - from_node=hfmor, - obj_type='ComputeResource') - - # Grab the host managed object reference of the esxi_hostname - try: - hostmor = [k for k, - v in vsphere_client.get_hosts().items() if v == esxi_hostname][0] - except IndexError, e: - vsphere_client.disconnect() - module.fail_json(msg="Cannot find esx host named: %s" % esxi_hostname) - - # Grab the computerResource managed object reference of the host we are - # creating the VM on. - crmor = None - for cr in crmors: - if crmor: - break - for p in cr.PropSet: - if p.Name == "host": - for h in p.Val.get_element_ManagedObjectReference(): - if h == hostmor: - crmor = cr.Obj - break - if crmor: - break - crprops = VIProperty(vsphere_client, crmor) - - # Get resource pool managed reference - # Requires that a cluster name be specified. - if resource_pool: - try: - cluster = [k for k, - v in vsphere_client.get_clusters().items() if v == cluster_name][0] - except IndexError, e: - vsphere_client.disconnect() - module.fail_json(msg="Cannot find Cluster named: %s" % - cluster_name) - - try: - rpmor = [k for k, v in vsphere_client.get_resource_pools( - from_mor=cluster).items() - if v == resource_pool][0] - except IndexError, e: - vsphere_client.disconnect() - module.fail_json(msg="Cannot find Resource Pool named: %s" % - resource_pool) - - else: - rpmor = crprops.resourcePool._obj - - # CREATE VM CONFIGURATION - # get config target - request = VI.QueryConfigTargetRequestMsg() - _this = request.new__this(crprops.environmentBrowser._obj) - _this.set_attribute_type( - crprops.environmentBrowser._obj.get_attribute_type()) - request.set_element__this(_this) - h = request.new_host(hostmor) - h.set_attribute_type(hostmor.get_attribute_type()) - request.set_element_host(h) - config_target = vsphere_client._proxy.QueryConfigTarget(request)._returnval - - # get default devices - request = VI.QueryConfigOptionRequestMsg() - _this = request.new__this(crprops.environmentBrowser._obj) - _this.set_attribute_type( - crprops.environmentBrowser._obj.get_attribute_type()) - request.set_element__this(_this) - h = request.new_host(hostmor) - h.set_attribute_type(hostmor.get_attribute_type()) - request.set_element_host(h) - config_option = vsphere_client._proxy.QueryConfigOption(request)._returnval - default_devs = config_option.DefaultDevice - - # add parameters to the create vm task - create_vm_request = VI.CreateVM_TaskRequestMsg() - config = create_vm_request.new_config() - if vm_hw_version: - config.set_element_version(vm_hw_version) - vmfiles = config.new_files() - datastore_name, ds = find_datastore( - module, vsphere_client, vm_disk['disk1']['datastore'], config_target) - vmfiles.set_element_vmPathName(datastore_name) - config.set_element_files(vmfiles) - config.set_element_name(guest) - if 'notes' in vm_extra_config: - config.set_element_annotation(vm_extra_config['notes']) - config.set_element_memoryMB(int(vm_hardware['memory_mb'])) - config.set_element_numCPUs(int(vm_hardware['num_cpus'])) - config.set_element_guestId(vm_hardware['osid']) - devices = [] - - # Attach all the hardware we want to the VM spec. - # Add a scsi controller to the VM spec. - disk_ctrl_key = add_scsi_controller( - module, vsphere_client, config, devices, vm_hardware['scsi']) - if vm_disk: - disk_num = 0 - disk_key = 0 - for disk in sorted(vm_disk.iterkeys()): - try: - datastore = vm_disk[disk]['datastore'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. datastore needs to be" - " specified." % disk) - try: - disksize = int(vm_disk[disk]['size_gb']) - # Convert the disk size to kiloboytes - disksize = disksize * 1024 * 1024 - except (KeyError, ValueError): - vsphere_client.disconnect() - module.fail_json(msg="Error on %s definition. size needs to be specified as an integer." % disk) - try: - disktype = vm_disk[disk]['type'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. type needs to be" - " specified." % disk) - # Add the disk to the VM spec. - add_disk( - module, vsphere_client, config_target, config, - devices, datastore, disktype, disksize, disk_ctrl_key, - disk_num, disk_key) - disk_num = disk_num + 1 - disk_key = disk_key + 1 - if 'vm_cdrom' in vm_hardware: - cdrom_iso_path = None - cdrom_type = None - try: - cdrom_type = vm_hardware['vm_cdrom']['type'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. cdrom type needs to be" - " specified." % vm_hardware['vm_cdrom']) - if cdrom_type == 'iso': - try: - cdrom_iso_path = vm_hardware['vm_cdrom']['iso_path'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. cdrom iso_path needs" - " to be specified." % vm_hardware['vm_cdrom']) - # Add a CD-ROM device to the VM. - add_cdrom(module, vsphere_client, config_target, config, devices, - default_devs, cdrom_type, cdrom_iso_path) - if vm_nic: - for nic in sorted(vm_nic.iterkeys()): - try: - nictype = vm_nic[nic]['type'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. type needs to be " - " specified." % nic) - try: - network = vm_nic[nic]['network'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. network needs to be " - " specified." % nic) - try: - network_type = vm_nic[nic]['network_type'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. network_type needs to be " - " specified." % nic) - # Add the nic to the VM spec. - add_nic(module, vsphere_client, nfmor, config, devices, - nictype, network, network_type) - - config.set_element_deviceChange(devices) - create_vm_request.set_element_config(config) - folder_mor = create_vm_request.new__this(vmfmor) - folder_mor.set_attribute_type(vmfmor.get_attribute_type()) - create_vm_request.set_element__this(folder_mor) - rp_mor = create_vm_request.new_pool(rpmor) - rp_mor.set_attribute_type(rpmor.get_attribute_type()) - create_vm_request.set_element_pool(rp_mor) - host_mor = create_vm_request.new_host(hostmor) - host_mor.set_attribute_type(hostmor.get_attribute_type()) - create_vm_request.set_element_host(host_mor) - - # CREATE THE VM - taskmor = vsphere_client._proxy.CreateVM_Task(create_vm_request)._returnval - task = VITask(taskmor, vsphere_client) - task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) - if task.get_state() == task.STATE_ERROR: - vsphere_client.disconnect() - module.fail_json(msg="Error creating vm: %s" % - task.get_error_message()) - else: - # We always need to get the vm because we are going to gather facts - vm = vsphere_client.get_vm_by_name(guest) - - # VM was created. If there is any extra config options specified, set - # them here , disconnect from vcenter, then exit. - if vm_extra_config: - vm.set_extra_config(vm_extra_config) - - # Power on the VM if it was requested - power_state(vm, state, True) - - vsphere_client.disconnect() - module.exit_json( - ansible_facts=gather_facts(vm), - changed=True, - changes="Created VM %s" % guest) - - -def delete_vm(vsphere_client, module, guest, vm, force): - try: - - if vm.is_powered_on(): - if force: - try: - vm.power_off(sync_run=True) - vm.get_status() - - except Exception, e: - module.fail_json( - msg='Failed to shutdown vm %s: %s' % (guest, e)) - else: - module.fail_json( - msg='You must use either shut the vm down first or ' - 'use force ') - - # Invoke Destroy_Task - request = VI.Destroy_TaskRequestMsg() - _this = request.new__this(vm._mor) - _this.set_attribute_type(vm._mor.get_attribute_type()) - request.set_element__this(_this) - ret = vsphere_client._proxy.Destroy_Task(request)._returnval - task = VITask(ret, vsphere_client) - - # Wait for the task to finish - status = task.wait_for_state( - [task.STATE_SUCCESS, task.STATE_ERROR]) - if status == task.STATE_ERROR: - vsphere_client.disconnect() - module.fail_json(msg="Error removing vm: %s %s" % - task.get_error_message()) - module.exit_json(changed=True, changes="VM %s deleted" % guest) - except Exception, e: - module.fail_json( - msg='Failed to delete vm %s : %s' % (guest, e)) - - -def power_state(vm, state, force): - """ - Correctly set the power status for a VM determined by the current and - requested states. force is forceful - """ - power_status = vm.get_status() - - check_status = ' '.join(state.split("_")).upper() - - # Need Force - if not force and power_status in [ - 'SUSPENDED', 'POWERING ON', - 'RESETTING', 'BLOCKED ON MSG' - ]: - - return "VM is in %s power state. Force is required!" % power_status - - # State is already true - if power_status == check_status: - return False - - else: - try: - if state == 'powered_off': - vm.power_off(sync_run=True) - - elif state == 'powered_on': - vm.power_on(sync_run=True) - - elif state == 'restarted': - if power_status in ('POWERED ON', 'POWERING ON', 'RESETTING'): - vm.reset(sync_run=False) - else: - return "Cannot restart VM in the current state %s" \ - % power_status - return True - - except Exception, e: - return e - - return False - - -def gather_facts(vm): - """ - Gather facts for VM directly from vsphere. - """ - vm.get_properties() - facts = { - 'module_hw': True, - 'hw_name': vm.properties.name, - 'hw_guest_full_name': vm.properties.config.guestFullName, - 'hw_guest_id': vm.properties.config.guestId, - 'hw_product_uuid': vm.properties.config.uuid, - 'hw_processor_count': vm.properties.config.hardware.numCPU, - 'hw_memtotal_mb': vm.properties.config.hardware.memoryMB, - } - - ifidx = 0 - for entry in vm.properties.config.hardware.device: - - if not hasattr(entry, 'macAddress'): - continue - - factname = 'hw_eth' + str(ifidx) - facts[factname] = { - 'addresstype': entry.addressType, - 'label': entry.deviceInfo.label, - 'macaddress': entry.macAddress, - 'macaddress_dash': entry.macAddress.replace(':', '-'), - 'summary': entry.deviceInfo.summary, - } - - ifidx += 1 - - return facts - - -class DefaultVMConfig(object): - - """ - Shallow and deep dict comparison for interfaces - """ - - def __init__(self, check_dict, interface_dict): - self.check_dict, self.interface_dict = check_dict, interface_dict - self.set_current, self.set_past = set( - check_dict.keys()), set(interface_dict.keys()) - self.intersect = self.set_current.intersection(self.set_past) - self.recursive_missing = None - - def shallow_diff(self): - return self.set_past - self.intersect - - def recursive_diff(self): - - if not self.recursive_missing: - self.recursive_missing = [] - for key, value in self.interface_dict.items(): - if isinstance(value, dict): - for k, v in value.items(): - if k in self.check_dict[key]: - if not isinstance(self.check_dict[key][k], v): - try: - if v == int: - self.check_dict[key][k] = int(self.check_dict[key][k]) - elif v == basestring: - self.check_dict[key][k] = str(self.check_dict[key][k]) - else: - raise ValueError - except ValueError: - self.recursive_missing.append((k, v)) - else: - self.recursive_missing.append((k, v)) - - return self.recursive_missing - - -def config_check(name, passed, default, module): - """ - Checks that the dict passed for VM configuration matches the required - interface declared at the top of __main__ - """ - - diff = DefaultVMConfig(passed, default) - if len(diff.shallow_diff()): - module.fail_json( - msg="Missing required key/pair [%s]. %s must contain %s" % - (', '.join(diff.shallow_diff()), name, default)) - - if diff.recursive_diff(): - module.fail_json( - msg="Config mismatch for %s on %s" % - (name, diff.recursive_diff())) - - return True - - -def main(): - - vm = None - - proto_vm_hardware = { - 'memory_mb': int, - 'num_cpus': int, - 'scsi': basestring, - 'osid': basestring - } - - proto_vm_disk = { - 'disk1': { - 'datastore': basestring, - 'size_gb': int, - 'type': basestring - } - } - - proto_vm_nic = { - 'nic1': { - 'type': basestring, - 'network': basestring, - 'network_type': basestring - } - } - - proto_esxi = { - 'datacenter': basestring, - 'hostname': basestring - } - - module = AnsibleModule( - argument_spec=dict( - vcenter_hostname=dict(required=True, type='str'), - username=dict(required=True, type='str'), - password=dict(required=True, type='str'), - state=dict( - required=False, - choices=[ - 'powered_on', - 'powered_off', - 'present', - 'absent', - 'restarted', - 'reconfigured' - ], - default='present'), - vmware_guest_facts=dict(required=False, choices=BOOLEANS), - guest=dict(required=True, type='str'), - vm_disk=dict(required=False, type='dict', default={}), - vm_nic=dict(required=False, type='dict', default={}), - vm_hardware=dict(required=False, type='dict', default={}), - vm_extra_config=dict(required=False, type='dict', default={}), - vm_hw_version=dict(required=False, default=None, type='str'), - resource_pool=dict(required=False, default=None, type='str'), - cluster=dict(required=False, default=None, type='str'), - force=dict(required=False, choices=BOOLEANS, default=False), - esxi=dict(required=False, type='dict', default={}), - - - ), - supports_check_mode=False, - mutually_exclusive=[['state', 'vmware_guest_facts']], - required_together=[ - ['state', 'force'], - [ - 'state', - 'vm_disk', - 'vm_nic', - 'vm_hardware', - 'esxi' - ], - ['resource_pool', 'cluster'] - ], - ) - - if not HAS_PYSPHERE: - module.fail_json(msg='pysphere module required') - - vcenter_hostname = module.params['vcenter_hostname'] - username = module.params['username'] - password = module.params['password'] - vmware_guest_facts = module.params['vmware_guest_facts'] - state = module.params['state'] - guest = module.params['guest'] - force = module.params['force'] - vm_disk = module.params['vm_disk'] - vm_nic = module.params['vm_nic'] - vm_hardware = module.params['vm_hardware'] - vm_extra_config = module.params['vm_extra_config'] - vm_hw_version = module.params['vm_hw_version'] - esxi = module.params['esxi'] - resource_pool = module.params['resource_pool'] - cluster = module.params['cluster'] - - # CONNECT TO THE SERVER - viserver = VIServer() - try: - viserver.connect(vcenter_hostname, username, password) - except VIApiException, err: - module.fail_json(msg="Cannot connect to %s: %s" % - (vcenter_hostname, err)) - - # Check if the VM exists before continuing - try: - vm = viserver.get_vm_by_name(guest) - except Exception: - pass - - if vm: - # Run for facts only - if vmware_guest_facts: - try: - module.exit_json(ansible_facts=gather_facts(vm)) - except Exception, e: - module.fail_json( - msg="Fact gather failed with exception %s" % e) - - # Power Changes - elif state in ['powered_on', 'powered_off', 'restarted']: - state_result = power_state(vm, state, force) - - # Failure - if isinstance(state_result, basestring): - module.fail_json(msg=state_result) - else: - module.exit_json(changed=state_result) - - # Just check if there - elif state == 'present': - module.exit_json(changed=False) - - # Fail on reconfig without params - elif state == 'reconfigured': - reconfigure_vm( - vsphere_client=viserver, - vm=vm, - module=module, - esxi=esxi, - resource_pool=resource_pool, - cluster_name=cluster, - guest=guest, - vm_extra_config=vm_extra_config, - vm_hardware=vm_hardware, - vm_disk=vm_disk, - vm_nic=vm_nic, - state=state, - force=force - ) - elif state == 'absent': - delete_vm( - vsphere_client=viserver, - module=module, - guest=guest, - vm=vm, - force=force) - - # VM doesn't exist - else: - - # Fail for fact gather task - if vmware_guest_facts: - module.fail_json( - msg="No such VM %s. Fact gathering requires an existing vm" - % guest) - if state in ['restarted', 'reconfigured']: - module.fail_json( - msg="No such VM %s. States [" - "restarted, reconfigured] required an existing VM" % guest) - elif state == 'absent': - module.exit_json(changed=False, msg="vm %s not present" % guest) - - # Create the VM - elif state in ['present', 'powered_off', 'powered_on']: - - # Check the guest_config - config_check("vm_disk", vm_disk, proto_vm_disk, module) - config_check("vm_nic", vm_nic, proto_vm_nic, module) - config_check("vm_hardware", vm_hardware, proto_vm_hardware, module) - config_check("esxi", esxi, proto_esxi, module) - - create_vm( - vsphere_client=viserver, - module=module, - esxi=esxi, - resource_pool=resource_pool, - cluster_name=cluster, - guest=guest, - vm_extra_config=vm_extra_config, - vm_hardware=vm_hardware, - vm_disk=vm_disk, - vm_nic=vm_nic, - vm_hw_version=vm_hw_version, - state=state - ) - - viserver.disconnect() - module.exit_json( - changed=False, - vcenter=vcenter_hostname) - - -# this is magic, see lib/ansible/module_common.py -#<> -main() diff --git a/library/commands/command b/library/commands/command deleted file mode 100644 index c1fabd4f9b4..00000000000 --- a/library/commands/command +++ /dev/null @@ -1,275 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan , and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import sys -import datetime -import traceback -import re -import shlex -import os - -DOCUMENTATION = ''' ---- -module: command -version_added: historical -short_description: Executes a command on a remote node -description: - - The M(command) module takes the command name followed by a list of space-delimited arguments. - - The given command will be executed on all selected nodes. It will not be - processed through the shell, so variables like C($HOME) and operations - like C("<"), C(">"), C("|"), and C("&") will not work (use the M(shell) - module if you need these features). -options: - free_form: - description: - - the command module takes a free form command to run. There is no parameter actually named 'free form'. - See the examples! - required: true - default: null - aliases: [] - creates: - description: - - a filename, when it already exists, this step will B(not) be run. - required: no - default: null - removes: - description: - - a filename, when it does not exist, this step will B(not) be run. - version_added: "0.8" - required: no - default: null - chdir: - description: - - cd into this directory before running the command - version_added: "0.6" - required: false - default: null - executable: - description: - - change the shell used to execute the command. Should be an absolute path to the executable. - required: false - default: null - version_added: "0.9" - warn: - version_added: "1.8" - default: yes - description: - - if command warnings are on in ansible.cfg, do not warn about this particular line if set to no/false. - required: false - default: True -notes: - - If you want to run a command through the shell (say you are using C(<), - C(>), C(|), etc), you actually want the M(shell) module instead. The - M(command) module is much more secure as it's not affected by the user's - environment. - - " C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not exist, use this." -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Example from Ansible Playbooks. -- command: /sbin/shutdown -t now - -# Run the command if the specified file does not exist. -- command: /usr/bin/make_database.sh arg1 arg2 creates=/path/to/database - -# You can also use the 'args' form to provide the options. This command -# will change the working directory to somedir/ and will only run when -# /path/to/database doesn't exist. -- command: /usr/bin/make_database.sh arg1 arg2 - args: - chdir: somedir/ - creates: /path/to/database -''' - -# This is a pretty complex regex, which functions as follows: -# -# 1. (^|\s) -# ^ look for a space or the beginning of the line -# 2. (creates|removes|chdir|executable|NO_LOG)= -# ^ look for a valid param, followed by an '=' -# 3. (?P[\'"])? -# ^ look for an optional quote character, which can either be -# a single or double quote character, and store it for later -# 4. (.*?) -# ^ match everything in a non-greedy manner until... -# 5. (?(quote)(?[\'"])?(.*?)(?(quote)(? 1 and (v.startswith('"') and v.endswith('"') or v.startswith("'") and v.endswith("'")): - v = v[1:-1] - if k in ('creates', 'removes', 'chdir', 'executable', 'NO_LOG'): - if k == "chdir": - v = os.path.abspath(os.path.expanduser(v)) - if not (os.path.exists(v) and os.path.isdir(v)): - self.fail_json(rc=258, msg="cannot change to directory '%s': path does not exist" % v) - elif k == "executable": - v = os.path.abspath(os.path.expanduser(v)) - if not (os.path.exists(v)): - self.fail_json(rc=258, msg="cannot use executable '%s': file does not exist" % v) - params[k] = v - # Remove any of the above k=v params from the args string - args = PARAM_REGEX.sub('', args) - params['args'] = args.strip() - - return (params, params['args']) - -main() diff --git a/library/commands/raw b/library/commands/raw deleted file mode 100644 index 87f2b5c4bdc..00000000000 --- a/library/commands/raw +++ /dev/null @@ -1,43 +0,0 @@ -# this is a virtual module that is entirely implemented server side - -DOCUMENTATION = ''' ---- -module: raw -version_added: historical -short_description: Executes a low-down and dirty SSH command -options: - free_form: - description: - - the raw module takes a free form command to run - required: true - executable: - description: - - change the shell used to execute the command. Should be an absolute path to the executable. - required: false - version_added: "1.0" -description: - - Executes a low-down and dirty SSH command, not going through the module - subsystem. This is useful and should only be done in two cases. The - first case is installing C(python-simplejson) on older (Python 2.4 and - before) hosts that need it as a dependency to run modules, since nearly - all core modules require it. Another is speaking to any devices such as - routers that do not have any Python installed. In any other case, using - the M(shell) or M(command) module is much more appropriate. Arguments - given to M(raw) are run directly through the configured remote shell. - Standard output, error output and return code are returned when - available. There is no change handler support for this module. - - This module does not require python on the remote system, much like - the M(script) module. -notes: - - If you want to execute a command securely and predictably, it may be - better to use the M(command) module instead. Best practices when writing - playbooks will follow the trend of using M(command) unless M(shell) is - explicitly required. When running ad-hoc commands, use your best - judgement. -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Bootstrap a legacy python 2.4 host -- raw: yum -y install python-simplejson -''' diff --git a/library/commands/script b/library/commands/script deleted file mode 100644 index 01a1ae34e71..00000000000 --- a/library/commands/script +++ /dev/null @@ -1,47 +0,0 @@ - -DOCUMENTATION = """ ---- -module: script -version_added: "0.9" -short_description: Runs a local script on a remote node after transferring it -description: - - "The M(script) module takes the script name followed by a list of - space-delimited arguments. " - - "The local script at path will be transferred to the remote node and then executed. " - - "The given script will be processed through the shell environment on the remote node. " - - "This module does not require python on the remote system, much like - the M(raw) module. " -options: - free_form: - description: - - path to the local script file followed by optional arguments. - required: true - default: null - aliases: [] - creates: - description: - - a filename, when it already exists, this step will B(not) be run. - required: no - default: null - version_added: "1.5" - removes: - description: - - a filename, when it does not exist, this step will B(not) be run. - required: no - default: null - version_added: "1.5" -notes: - - It is usually preferable to write Ansible modules than pushing scripts. Convert your script to an Ansible module for bonus points! -author: Michael DeHaan -""" - -EXAMPLES = ''' -# Example from Ansible Playbooks -- script: /some/local/script.sh --some-arguments 1234 - -# Run a script that creates a file, but only if the file is not yet created -- script: /some/local/create_file.sh --some-arguments 1234 creates=/the/created/file.txt - -# Run a script that removes a file, but only if the file is not yet removed -- script: /some/local/remove_file.sh --some-arguments 1234 removes=/the/removed/file.txt -''' diff --git a/library/commands/shell b/library/commands/shell deleted file mode 100644 index b63a21080ee..00000000000 --- a/library/commands/shell +++ /dev/null @@ -1,78 +0,0 @@ -# There is actually no actual shell module source, when you use 'shell' in ansible, -# it runs the 'command' module with special arguments and it behaves differently. -# See the command source and the comment "#USE_SHELL". - -DOCUMENTATION = ''' ---- -module: shell -short_description: Execute commands in nodes. -description: - - The M(shell) module takes the command name followed by a list of space-delimited arguments. - It is almost exactly like the M(command) module but runs - the command through a shell (C(/bin/sh)) on the remote node. -version_added: "0.2" -options: - free_form: - description: - - The shell module takes a free form command to run, as a string. There's not an actual - option named "free form". See the examples! - required: true - default: null - creates: - description: - - a filename, when it already exists, this step will B(not) be run. - required: no - default: null - removes: - description: - - a filename, when it does not exist, this step will B(not) be run. - version_added: "0.8" - required: no - default: null - chdir: - description: - - cd into this directory before running the command - required: false - default: null - version_added: "0.6" - executable: - description: - - change the shell used to execute the command. Should be an absolute path to the executable. - required: false - default: null - version_added: "0.9" - warn: - description: - - if command warnings are on in ansible.cfg, do not warn about this particular line if set to no/false. - required: false - default: True - version_added: "1.8" -notes: - - If you want to execute a command securely and predictably, it may be - better to use the M(command) module instead. Best practices when writing - playbooks will follow the trend of using M(command) unless M(shell) is - explicitly required. When running ad-hoc commands, use your best - judgement. - - To sanitize any variables passed to the shell module, you should use - "{{ var | quote }}" instead of just "{{ var }}" to make sure they don't include evil things like semicolons. - -requirements: [ ] -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Execute the command in remote shell; stdout goes to the specified -# file on the remote. -- shell: somescript.sh >> somelog.txt - -# Change the working directory to somedir/ before executing the command. -- shell: somescript.sh >> somelog.txt chdir=somedir/ - -# You can also use the 'args' form to provide the options. This command -# will change the working directory to somedir/ and will only run when -# somedir/somelog.txt doesn't exist. -- shell: somescript.sh >> somelog.txt - args: - chdir: somedir/ - creates: somelog.txt -''' diff --git a/library/database/mongodb_user b/library/database/mongodb_user deleted file mode 100644 index 5d7e0897b68..00000000000 --- a/library/database/mongodb_user +++ /dev/null @@ -1,242 +0,0 @@ -#!/usr/bin/python - -# (c) 2012, Elliott Foster -# Sponsored by Four Kitchens http://fourkitchens.com. -# (c) 2014, Epic Games, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: mongodb_user -short_description: Adds or removes a user from a MongoDB database. -description: - - Adds or removes a user from a MongoDB database. -version_added: "1.1" -options: - login_user: - description: - - The username used to authenticate with - required: false - default: null - login_password: - description: - - The password used to authenticate with - required: false - default: null - login_host: - description: - - The host running the database - required: false - default: localhost - login_port: - description: - - The port to connect to - required: false - default: 27017 - replica_set: - version_added: "1.6" - description: - - Replica set to connect to (automatically connects to primary for writes) - required: false - default: null - database: - description: - - The name of the database to add/remove the user from - required: true - user: - description: - - The name of the user to add or remove - required: true - default: null - password: - description: - - The password to use for the user - required: false - default: null - roles: - version_added: "1.3" - description: - - "The database user roles valid values are one or more of the following: read, 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', 'dbAdminAnyDatabase'" - - This param requires mongodb 2.4+ and pymongo 2.5+ - required: false - default: "readWrite" - state: - state: - description: - - The database user state - required: false - default: present - choices: [ "present", "absent" ] -notes: - - Requires the pymongo Python package on the remote host, version 2.4.2+. This - can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html -requirements: [ "pymongo" ] -author: Elliott Foster -''' - -EXAMPLES = ''' -# Create 'burgers' database user with name 'bob' and password '12345'. -- mongodb_user: database=burgers name=bob password=12345 state=present - -# Delete 'burgers' database user with name 'bob'. -- mongodb_user: database=burgers name=bob state=absent - -# Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style) -- mongodb_user: database=burgers name=ben password=12345 roles='read' state=present -- mongodb_user: database=burgers name=jim password=12345 roles='readWrite,dbAdmin,userAdmin' state=present -- mongodb_user: database=burgers name=joe password=12345 roles='readWriteAnyDatabase' state=present - -# add a user to database in a replica set, the primary server is automatically discovered and written to -- mongodb_user: database=burgers name=bob replica_set=blecher password=12345 roles='readWriteAnyDatabase' state=present -''' - -import ConfigParser -from distutils.version import LooseVersion -try: - from pymongo.errors import ConnectionFailure - from pymongo.errors import OperationFailure - from pymongo import version as PyMongoVersion - from pymongo import MongoClient -except ImportError: - try: # for older PyMongo 2.2 - from pymongo import Connection as MongoClient - except ImportError: - pymongo_found = False - else: - pymongo_found = True -else: - pymongo_found = True - -# ========================================= -# MongoDB module specific support methods. -# - -def user_add(module, client, db_name, user, password, roles): - db = client[db_name] - if roles is None: - db.add_user(user, password, False) - else: - try: - db.add_user(user, password, None, roles=roles) - except OperationFailure, e: - err_msg = str(e) - if LooseVersion(PyMongoVersion) <= LooseVersion('2.5'): - err_msg = err_msg + ' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)' - module.fail_json(msg=err_msg) - -def user_remove(client, db_name, user): - db = client[db_name] - db.remove_user(user) - -def load_mongocnf(): - config = ConfigParser.RawConfigParser() - mongocnf = os.path.expanduser('~/.mongodb.cnf') - - try: - config.readfp(open(mongocnf)) - creds = dict( - user=config.get('client', 'user'), - password=config.get('client', 'pass') - ) - except (ConfigParser.NoOptionError, IOError): - return False - - return creds - -# ========================================= -# Module execution. -# - -def main(): - module = AnsibleModule( - argument_spec = dict( - login_user=dict(default=None), - login_password=dict(default=None), - login_host=dict(default='localhost'), - login_port=dict(default='27017'), - replica_set=dict(default=None), - database=dict(required=True, aliases=['db']), - user=dict(required=True, aliases=['name']), - password=dict(aliases=['pass']), - roles=dict(default=None, type='list'), - state=dict(default='present', choices=['absent', 'present']), - ) - ) - - if not pymongo_found: - module.fail_json(msg='the python pymongo module is required') - - login_user = module.params['login_user'] - login_password = module.params['login_password'] - login_host = module.params['login_host'] - login_port = module.params['login_port'] - replica_set = module.params['replica_set'] - db_name = module.params['database'] - user = module.params['user'] - password = module.params['password'] - roles = module.params['roles'] - state = module.params['state'] - - try: - if replica_set: - client = MongoClient(login_host, int(login_port), replicaset=replica_set) - else: - client = MongoClient(login_host, int(login_port)) - - # try to authenticate as a target user to check if it already exists - try: - client[db_name].authenticate(user, password) - if state == 'present': - module.exit_json(changed=False, user=user) - except OperationFailure: - if state == 'absent': - module.exit_json(changed=False, user=user) - - if login_user is None and login_password is None: - mongocnf_creds = load_mongocnf() - if mongocnf_creds is not False: - login_user = mongocnf_creds['user'] - login_password = mongocnf_creds['password'] - elif login_password is None and login_user is not None: - module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') - - if login_user is not None and login_password is not None: - client.admin.authenticate(login_user, login_password) - - except ConnectionFailure, e: - module.fail_json(msg='unable to connect to database: %s' % str(e)) - - if state == 'present': - if password is None: - module.fail_json(msg='password parameter required when adding a user') - - try: - user_add(module, client, db_name, user, password, roles) - except OperationFailure, e: - module.fail_json(msg='Unable to add or update user: %s' % str(e)) - - elif state == 'absent': - try: - user_remove(client, db_name, user) - except OperationFailure, e: - module.fail_json(msg='Unable to remove user: %s' % str(e)) - - module.exit_json(changed=True, user=user) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/database/mysql_db b/library/database/mysql_db deleted file mode 100644 index 38dee608ba5..00000000000 --- a/library/database/mysql_db +++ /dev/null @@ -1,363 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Mark Theunissen -# Sponsored by Four Kitchens http://fourkitchens.com. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: mysql_db -short_description: Add or remove MySQL databases from a remote host. -description: - - Add or remove MySQL databases from a remote host. -version_added: "0.6" -options: - name: - description: - - name of the database to add or remove - required: true - default: null - aliases: [ db ] - login_user: - description: - - The username used to authenticate with - required: false - default: null - login_password: - description: - - The password used to authenticate with - required: false - default: null - login_host: - description: - - Host running the database - required: false - default: localhost - login_port: - description: - - Port of the MySQL server. Requires login_host be defined as other then localhost if login_port is used - required: false - default: 3306 - login_unix_socket: - description: - - The path to a Unix domain socket for local connections - required: false - default: null - state: - description: - - The database state - required: false - default: present - choices: [ "present", "absent", "dump", "import" ] - collation: - description: - - Collation mode - required: false - default: null - encoding: - description: - - Encoding mode - required: false - default: null - target: - description: - - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL - files (C(.sql)) as well as bzip2 (C(.bz2)) and gzip (C(.gz)) compressed files are supported. - required: false -notes: - - Requires the MySQLdb Python package on the remote host. For Ubuntu, this - is as easy as apt-get install python-mysqldb. (See M(apt).) - - Both I(login_password) and I(login_user) are required when you are - passing credentials. If none are present, the module will attempt to read - the credentials from C(~/.my.cnf), and finally fall back to using the MySQL - default login of C(root) with no password. -requirements: [ ConfigParser ] -author: Mark Theunissen -''' - -EXAMPLES = ''' -# Create a new database with name 'bobdata' -- mysql_db: name=bobdata state=present - -# Copy database dump file to remote host and restore it to database 'my_db' -- copy: src=dump.sql.bz2 dest=/tmp -- mysql_db: name=my_db state=import target=/tmp/dump.sql.bz2 -''' - -import ConfigParser -import os -import pipes -try: - import MySQLdb -except ImportError: - mysqldb_found = False -else: - mysqldb_found = True - -# =========================================== -# MySQL module specific support methods. -# - -def db_exists(cursor, db): - res = cursor.execute("SHOW DATABASES LIKE %s", (db.replace("_","\_"),)) - return bool(res) - -def db_delete(cursor, db): - query = "DROP DATABASE `%s`" % db - cursor.execute(query) - return True - -def db_dump(module, host, user, password, db_name, target, port, socket=None): - cmd = module.get_bin_path('mysqldump', True) - cmd += " --quick --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password)) - if socket is not None: - cmd += " --socket=%s" % pipes.quote(socket) - else: - cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port)) - cmd += " %s" % pipes.quote(db_name) - if os.path.splitext(target)[-1] == '.gz': - cmd = cmd + ' | gzip > ' + pipes.quote(target) - elif os.path.splitext(target)[-1] == '.bz2': - cmd = cmd + ' | bzip2 > ' + pipes.quote(target) - else: - cmd += " > %s" % pipes.quote(target) - rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) - return rc, stdout, stderr - -def db_import(module, host, user, password, db_name, target, port, socket=None): - if not os.path.exists(target): - return module.fail_json(msg="target %s does not exist on the host" % target) - - cmd = module.get_bin_path('mysql', True) - cmd += " --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password)) - if socket is not None: - cmd += " --socket=%s" % pipes.quote(socket) - else: - cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port)) - cmd += " -D %s" % pipes.quote(db_name) - if os.path.splitext(target)[-1] == '.gz': - gunzip_path = module.get_bin_path('gunzip') - if gunzip_path: - rc, stdout, stderr = module.run_command('%s %s' % (gunzip_path, target)) - if rc != 0: - return rc, stdout, stderr - cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) - rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) - if rc != 0: - return rc, stdout, stderr - gzip_path = module.get_bin_path('gzip') - if gzip_path: - rc, stdout, stderr = module.run_command('%s %s' % (gzip_path, os.path.splitext(target)[0])) - else: - module.fail_json(msg="gzip command not found") - else: - module.fail_json(msg="gunzip command not found") - elif os.path.splitext(target)[-1] == '.bz2': - bunzip2_path = module.get_bin_path('bunzip2') - if bunzip2_path: - rc, stdout, stderr = module.run_command('%s %s' % (bunzip2_path, target)) - if rc != 0: - return rc, stdout, stderr - cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) - rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) - if rc != 0: - return rc, stdout, stderr - bzip2_path = module.get_bin_path('bzip2') - if bzip2_path: - rc, stdout, stderr = module.run_command('%s %s' % (bzip2_path, os.path.splitext(target)[0])) - else: - module.fail_json(msg="bzip2 command not found") - else: - module.fail_json(msg="bunzip2 command not found") - else: - cmd += " < %s" % pipes.quote(target) - rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) - return rc, stdout, stderr - -def db_create(cursor, db, encoding, collation): - if encoding: - encoding = " CHARACTER SET %s" % encoding - if collation: - collation = " COLLATE %s" % collation - query = "CREATE DATABASE `%s`%s%s" % (db, encoding, collation) - res = cursor.execute(query) - return True - -def strip_quotes(s): - """ Remove surrounding single or double quotes - - >>> print strip_quotes('hello') - hello - >>> print strip_quotes('"hello"') - hello - >>> print strip_quotes("'hello'") - hello - >>> print strip_quotes("'hello") - 'hello - - """ - single_quote = "'" - double_quote = '"' - - if s.startswith(single_quote) and s.endswith(single_quote): - s = s.strip(single_quote) - elif s.startswith(double_quote) and s.endswith(double_quote): - s = s.strip(double_quote) - return s - - -def config_get(config, section, option): - """ Calls ConfigParser.get and strips quotes - - See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html - """ - return strip_quotes(config.get(section, option)) - - -def load_mycnf(): - config = ConfigParser.RawConfigParser() - mycnf = os.path.expanduser('~/.my.cnf') - if not os.path.exists(mycnf): - return False - try: - config.readfp(open(mycnf)) - except (IOError): - return False - # We support two forms of passwords in .my.cnf, both pass= and password=, - # as these are both supported by MySQL. - try: - passwd = config_get(config, 'client', 'password') - except (ConfigParser.NoOptionError): - try: - passwd = config_get(config, 'client', 'pass') - except (ConfigParser.NoOptionError): - return False - try: - creds = dict(user=config_get(config, 'client', 'user'),passwd=passwd) - except (ConfigParser.NoOptionError): - return False - return creds - -# =========================================== -# Module execution. -# - -def main(): - module = AnsibleModule( - argument_spec = dict( - login_user=dict(default=None), - login_password=dict(default=None), - login_host=dict(default="localhost"), - login_port=dict(default="3306"), - login_unix_socket=dict(default=None), - name=dict(required=True, aliases=['db']), - encoding=dict(default=""), - collation=dict(default=""), - target=dict(default=None), - state=dict(default="present", choices=["absent", "present","dump", "import"]), - ) - ) - - if not mysqldb_found: - module.fail_json(msg="the python mysqldb module is required") - - db = module.params["name"] - encoding = module.params["encoding"] - collation = module.params["collation"] - state = module.params["state"] - target = module.params["target"] - - # make sure the target path is expanded for ~ and $HOME - if target is not None: - target = os.path.expandvars(os.path.expanduser(target)) - - # Either the caller passes both a username and password with which to connect to - # mysql, or they pass neither and allow this module to read the credentials from - # ~/.my.cnf. - login_password = module.params["login_password"] - login_user = module.params["login_user"] - if login_user is None and login_password is None: - mycnf_creds = load_mycnf() - if mycnf_creds is False: - login_user = "root" - login_password = "" - else: - login_user = mycnf_creds["user"] - login_password = mycnf_creds["passwd"] - elif login_password is None or login_user is None: - module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") - login_host = module.params["login_host"] - - if state in ['dump','import']: - if target is None: - module.fail_json(msg="with state=%s target is required" % (state)) - connect_to_db = db - else: - connect_to_db = 'mysql' - try: - if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db=connect_to_db) - elif module.params["login_port"] != "3306" and module.params["login_host"] == "localhost": - module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined") - else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password, db=connect_to_db) - cursor = db_connection.cursor() - except Exception, e: - if "Unknown database" in str(e): - errno, errstr = e.args - module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) - else: - module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check ~/.my.cnf contains credentials") - - changed = False - if db_exists(cursor, db): - if state == "absent": - try: - changed = db_delete(cursor, db) - except Exception, e: - module.fail_json(msg="error deleting database: " + str(e)) - elif state == "dump": - rc, stdout, stderr = db_dump(module, login_host, login_user, - login_password, db, target, - port=module.params['login_port'], - socket=module.params['login_unix_socket']) - if rc != 0: - module.fail_json(msg="%s" % stderr) - else: - module.exit_json(changed=True, db=db, msg=stdout) - elif state == "import": - rc, stdout, stderr = db_import(module, login_host, login_user, - login_password, db, target, - port=module.params['login_port'], - socket=module.params['login_unix_socket']) - if rc != 0: - module.fail_json(msg="%s" % stderr) - else: - module.exit_json(changed=True, db=db, msg=stdout) - else: - if state == "present": - try: - changed = db_create(cursor, db, encoding, collation) - except Exception, e: - module.fail_json(msg="error creating database: " + str(e)) - - module.exit_json(changed=changed, db=db) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/database/mysql_replication b/library/database/mysql_replication deleted file mode 100644 index d965f3ce0d4..00000000000 --- a/library/database/mysql_replication +++ /dev/null @@ -1,369 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" - -Ansible module to manage mysql replication -(c) 2013, Balazs Pocze -Certain parts are taken from Mark Theunissen's mysqldb module - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: mysql_replication - -short_description: Manage MySQL replication -description: - - Manages MySQL server replication, slave, master status get and change master host. -version_added: "1.3" -options: - mode: - description: - - module operating mode. Could be getslave (SHOW SLAVE STATUS), getmaster (SHOW MASTER STATUS), changemaster (CHANGE MASTER TO), startslave (START SLAVE), stopslave (STOP SLAVE) - required: False - choices: - - getslave - - getmaster - - changemaster - - stopslave - - startslave - default: getslave - login_user: - description: - - username to connect mysql host, if defined login_password also needed. - required: False - login_password: - description: - - password to connect mysql host, if defined login_user also needed. - required: False - login_host: - description: - - mysql host to connect - required: False - login_unix_socket: - description: - - unix socket to connect mysql server - master_host: - description: - - same as mysql variable - master_user: - description: - - same as mysql variable - master_password: - description: - - same as mysql variable - master_port: - description: - - same as mysql variable - master_connect_retry: - description: - - same as mysql variable - master_log_file: - description: - - same as mysql variable - master_log_pos: - description: - - same as mysql variable - relay_log_file: - description: - - same as mysql variable - relay_log_pos: - description: - - same as mysql variable - master_ssl: - description: - - same as mysql variable - possible values: 0,1 - master_ssl_ca: - description: - - same as mysql variable - master_ssl_capath: - description: - - same as mysql variable - master_ssl_cert: - description: - - same as mysql variable - master_ssl_key: - description: - - same as mysql variable - master_ssl_cipher: - description: - - same as mysql variable - -''' - -EXAMPLES = ''' -# Stop mysql slave thread -- mysql_replication: mode=stopslave - -# Get master binlog file name and binlog position -- mysql_replication: mode=getmaster - -# Change master to master server 192.168.1.1 and use binary log 'mysql-bin.000009' with position 4578 -- mysql_replication: mode=changemaster master_host=192.168.1.1 master_log_file=mysql-bin.000009 master_log_pos=4578 -''' - -import ConfigParser -import os -import warnings - -try: - import MySQLdb -except ImportError: - mysqldb_found = False -else: - mysqldb_found = True - - -def get_master_status(cursor): - cursor.execute("SHOW MASTER STATUS") - masterstatus = cursor.fetchone() - return masterstatus - - -def get_slave_status(cursor): - cursor.execute("SHOW SLAVE STATUS") - slavestatus = cursor.fetchone() - return slavestatus - - -def stop_slave(cursor): - try: - cursor.execute("STOP SLAVE") - stopped = True - except: - stopped = False - return stopped - - -def start_slave(cursor): - try: - cursor.execute("START SLAVE") - started = True - except: - started = False - return started - - -def changemaster(cursor, chm): - SQLPARAM = ",".join(chm) - cursor.execute("CHANGE MASTER TO " + SQLPARAM) - - -def strip_quotes(s): - """ Remove surrounding single or double quotes - - >>> print strip_quotes('hello') - hello - >>> print strip_quotes('"hello"') - hello - >>> print strip_quotes("'hello'") - hello - >>> print strip_quotes("'hello") - 'hello - - """ - single_quote = "'" - double_quote = '"' - - if s.startswith(single_quote) and s.endswith(single_quote): - s = s.strip(single_quote) - elif s.startswith(double_quote) and s.endswith(double_quote): - s = s.strip(double_quote) - return s - - -def config_get(config, section, option): - """ Calls ConfigParser.get and strips quotes - - See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html - """ - return strip_quotes(config.get(section, option)) - - -def load_mycnf(): - config = ConfigParser.RawConfigParser() - mycnf = os.path.expanduser('~/.my.cnf') - if not os.path.exists(mycnf): - return False - try: - config.readfp(open(mycnf)) - except (IOError): - return False - # We support two forms of passwords in .my.cnf, both pass= and password=, - # as these are both supported by MySQL. - try: - passwd = config_get(config, 'client', 'password') - except (ConfigParser.NoOptionError): - try: - passwd = config_get(config, 'client', 'pass') - except (ConfigParser.NoOptionError): - return False - - # If .my.cnf doesn't specify a user, default to user login name - try: - user = config_get(config, 'client', 'user') - except (ConfigParser.NoOptionError): - user = getpass.getuser() - creds = dict(user=user, passwd=passwd) - return creds - - -def main(): - module = AnsibleModule( - argument_spec = dict( - login_user=dict(default=None), - login_password=dict(default=None), - login_host=dict(default="localhost"), - login_unix_socket=dict(default=None), - mode=dict(default="getslave", choices=["getmaster", "getslave", "changemaster", "stopslave", "startslave"]), - master_host=dict(default=None), - master_user=dict(default=None), - master_password=dict(default=None), - master_port=dict(default=None), - master_connect_retry=dict(default=None), - master_log_file=dict(default=None), - master_log_pos=dict(default=None), - relay_log_file=dict(default=None), - relay_log_pos=dict(default=None), - master_ssl=dict(default=False, type='bool'), - master_ssl_ca=dict(default=None), - master_ssl_capath=dict(default=None), - master_ssl_cert=dict(default=None), - master_ssl_key=dict(default=None), - master_ssl_cipher=dict(default=None), - ) - ) - user = module.params["login_user"] - password = module.params["login_password"] - host = module.params["login_host"] - mode = module.params["mode"] - master_host = module.params["master_host"] - master_user = module.params["master_user"] - master_password = module.params["master_password"] - master_port = module.params["master_port"] - master_connect_retry = module.params["master_connect_retry"] - master_log_file = module.params["master_log_file"] - master_log_pos = module.params["master_log_pos"] - relay_log_file = module.params["relay_log_file"] - relay_log_pos = module.params["relay_log_pos"] - master_ssl = module.params["master_ssl"] - master_ssl_ca = module.params["master_ssl_ca"] - master_ssl_capath = module.params["master_ssl_capath"] - master_ssl_cert = module.params["master_ssl_cert"] - master_ssl_key = module.params["master_ssl_key"] - master_ssl_cipher = module.params["master_ssl_cipher"] - - if not mysqldb_found: - module.fail_json(msg="the python mysqldb module is required") - else: - warnings.filterwarnings('error', category=MySQLdb.Warning) - - # Either the caller passes both a username and password with which to connect to - # mysql, or they pass neither and allow this module to read the credentials from - # ~/.my.cnf. - login_password = module.params["login_password"] - login_user = module.params["login_user"] - if login_user is None and login_password is None: - mycnf_creds = load_mycnf() - if mycnf_creds is False: - login_user = "root" - login_password = "" - else: - login_user = mycnf_creds["user"] - login_password = mycnf_creds["passwd"] - elif login_password is None or login_user is None: - module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") - - try: - if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") - else: - db_connection = MySQLdb.connect(host=module.params["login_host"], user=login_user, passwd=login_password, db="mysql") - except Exception, e: - module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") - try: - cursor = db_connection.cursor(cursorclass=MySQLdb.cursors.DictCursor) - except Exception, e: - module.fail_json(msg="Trouble getting DictCursor from db_connection: %s" % e) - - if mode in "getmaster": - masterstatus = get_master_status(cursor) - try: - module.exit_json( **masterstatus ) - except TypeError: - module.fail_json(msg="Server is not configured as mysql master") - - elif mode in "getslave": - slavestatus = get_slave_status(cursor) - try: - module.exit_json( **slavestatus ) - except TypeError: - module.fail_json(msg="Server is not configured as mysql slave") - - elif mode in "changemaster": - print "Change master" - chm=[] - if master_host: - chm.append("MASTER_HOST='" + master_host + "'") - if master_user: - chm.append("MASTER_USER='" + master_user + "'") - if master_password: - chm.append("MASTER_PASSWORD='" + master_password + "'") - if master_port: - chm.append("MASTER_PORT=" + master_port) - if master_connect_retry: - chm.append("MASTER_CONNECT_RETRY='" + master_connect_retry + "'") - if master_log_file: - chm.append("MASTER_LOG_FILE='" + master_log_file + "'") - if master_log_pos: - chm.append("MASTER_LOG_POS=" + master_log_pos) - if relay_log_file: - chm.append("RELAY_LOG_FILE='" + relay_log_file + "'") - if relay_log_pos: - chm.append("RELAY_LOG_POS=" + relay_log_pos) - if master_ssl: - chm.append("MASTER_SSL=1") - if master_ssl_ca: - chm.append("MASTER_SSL_CA='" + master_ssl_ca + "'") - if master_ssl_capath: - chm.append("MASTER_SSL_CAPATH='" + master_ssl_capath + "'") - if master_ssl_cert: - chm.append("MASTER_SSL_CERT='" + master_ssl_cert + "'") - if master_ssl_key: - chm.append("MASTER_SSL_KEY='" + master_ssl_key + "'") - if master_ssl_cipher: - chm.append("MASTER_SSL_CIPHER='" + master_ssl_cipher + "'") - changemaster(cursor,chm) - module.exit_json(changed=True) - elif mode in "startslave": - started = start_slave(cursor) - if started is True: - module.exit_json(msg="Slave started ", changed=True) - else: - module.exit_json(msg="Slave already started (Or cannot be started)", changed=False) - elif mode in "stopslave": - stopped = stop_slave(cursor) - if stopped is True: - module.exit_json(msg="Slave stopped", changed=True) - else: - module.exit_json(msg="Slave already stopped", changed=False) - -# import module snippets -from ansible.module_utils.basic import * -main() -warnings.simplefilter("ignore") \ No newline at end of file diff --git a/library/database/mysql_user b/library/database/mysql_user deleted file mode 100644 index aaec05f99f5..00000000000 --- a/library/database/mysql_user +++ /dev/null @@ -1,476 +0,0 @@ -#!/usr/bin/python - -# (c) 2012, Mark Theunissen -# Sponsored by Four Kitchens http://fourkitchens.com. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: mysql_user -short_description: Adds or removes a user from a MySQL database. -description: - - Adds or removes a user from a MySQL database. -version_added: "0.6" -options: - name: - description: - - name of the user (role) to add or remove - required: true - default: null - password: - description: - - set the user's password - required: false - default: null - host: - description: - - the 'host' part of the MySQL username - required: false - default: localhost - login_user: - description: - - The username used to authenticate with - required: false - default: null - login_password: - description: - - The password used to authenticate with - required: false - default: null - login_host: - description: - - Host running the database - required: false - default: localhost - login_port: - description: - - Port of the MySQL server - required: false - default: 3306 - version_added: '1.4' - login_unix_socket: - description: - - The path to a Unix domain socket for local connections - required: false - default: null - priv: - description: - - "MySQL privileges string in the format: C(db.table:priv1,priv2)" - required: false - default: null - append_privs: - description: - - Append the privileges defined by priv to the existing ones for this - user instead of overwriting existing ones. - required: false - choices: [ "yes", "no" ] - default: "no" - version_added: "1.4" - state: - description: - - Whether the user should exist. When C(absent), removes - the user. - required: false - default: present - choices: [ "present", "absent" ] - check_implicit_admin: - description: - - Check if mysql allows login as root/nopassword before trying supplied credentials. - required: false - default: false - version_added: "1.3" -notes: - - Requires the MySQLdb Python package on the remote host. For Ubuntu, this - is as easy as apt-get install python-mysqldb. - - Both C(login_password) and C(login_username) are required when you are - passing credentials. If none are present, the module will attempt to read - the credentials from C(~/.my.cnf), and finally fall back to using the MySQL - default login of 'root' with no password. - - "MySQL server installs with default login_user of 'root' and no password. To secure this user - as part of an idempotent playbook, you must create at least two tasks: the first must change the root user's password, - without providing any login_user/login_password details. The second must drop a ~/.my.cnf file containing - the new root credentials. Subsequent runs of the playbook will then succeed by reading the new credentials from - the file." - -requirements: [ "ConfigParser", "MySQLdb" ] -author: Mark Theunissen -''' - -EXAMPLES = """ -# Create database user with name 'bob' and password '12345' with all database privileges -- mysql_user: name=bob password=12345 priv=*.*:ALL state=present - -# Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION' -- mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present - -# Ensure no user named 'sally' exists, also passing in the auth credentials. -- mysql_user: login_user=root login_password=123456 name=sally state=absent - -# Specify grants composed of more than one word -- mysql_user: name=replication password=12345 priv=*.*:"REPLICATION CLIENT" state=present - -# Revoke all privileges for user 'bob' and password '12345' -- mysql_user: name=bob password=12345 priv=*.*:USAGE state=present - -# Example privileges string format -mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanotherdb.*:ALL - -# Example using login_unix_socket to connect to server -- mysql_user: name=root password=abc123 login_unix_socket=/var/run/mysqld/mysqld.sock - -# Example .my.cnf file for setting the root password -# Note: don't use quotes around the password, because the mysql_user module -# will include them in the password but the mysql client will not - -[client] -user=root -password=n<_665{vS43y -""" - -import ConfigParser -import getpass -import tempfile -try: - import MySQLdb -except ImportError: - mysqldb_found = False -else: - mysqldb_found = True - -# =========================================== -# MySQL module specific support methods. -# - -def user_exists(cursor, user, host): - cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host)) - count = cursor.fetchone() - return count[0] > 0 - -def user_add(cursor, user, host, password, new_priv): - cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user,host,password)) - if new_priv is not None: - for db_table, priv in new_priv.iteritems(): - privileges_grant(cursor, user,host,db_table,priv) - return True - -def user_mod(cursor, user, host, password, new_priv, append_privs): - changed = False - grant_option = False - - # Handle passwords. - if password is not None: - cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host)) - current_pass_hash = cursor.fetchone() - cursor.execute("SELECT PASSWORD(%s)", (password,)) - new_pass_hash = cursor.fetchone() - if current_pass_hash[0] != new_pass_hash[0]: - cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password)) - changed = True - - # Handle privileges. - if new_priv is not None: - curr_priv = privileges_get(cursor, user,host) - - # If the user has privileges on a db.table that doesn't appear at all in - # the new specification, then revoke all privileges on it. - for db_table, priv in curr_priv.iteritems(): - # If the user has the GRANT OPTION on a db.table, revoke it first. - if "GRANT" in priv: - grant_option = True - if db_table not in new_priv: - if user != "root" and "PROXY" not in priv and not append_privs: - privileges_revoke(cursor, user,host,db_table,grant_option) - changed = True - - # If the user doesn't currently have any privileges on a db.table, then - # we can perform a straight grant operation. - for db_table, priv in new_priv.iteritems(): - if db_table not in curr_priv: - privileges_grant(cursor, user,host,db_table,priv) - changed = True - - # If the db.table specification exists in both the user's current privileges - # and in the new privileges, then we need to see if there's a difference. - db_table_intersect = set(new_priv.keys()) & set(curr_priv.keys()) - for db_table in db_table_intersect: - priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table]) - if (len(priv_diff) > 0): - if not append_privs: - privileges_revoke(cursor, user,host,db_table,grant_option) - privileges_grant(cursor, user,host,db_table,new_priv[db_table]) - changed = True - - return changed - -def user_delete(cursor, user, host): - cursor.execute("DROP USER %s@%s", (user,host)) - return True - -def privileges_get(cursor, user,host): - """ MySQL doesn't have a better method of getting privileges aside from the - SHOW GRANTS query syntax, which requires us to then parse the returned string. - Here's an example of the string that is returned from MySQL: - - GRANT USAGE ON *.* TO 'user'@'localhost' IDENTIFIED BY 'pass'; - - This function makes the query and returns a dictionary containing the results. - The dictionary format is the same as that returned by privileges_unpack() below. - """ - output = {} - cursor.execute("SHOW GRANTS FOR %s@%s", (user,host)) - grants = cursor.fetchall() - - def pick(x): - if x == 'ALL PRIVILEGES': - return 'ALL' - else: - return x - - for grant in grants: - res = re.match("GRANT (.+) ON (.+) TO '.+'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0]) - if res is None: - module.fail_json(msg="unable to parse the MySQL grant string") - privileges = res.group(1).split(", ") - privileges = [ pick(x) for x in privileges] - if "WITH GRANT OPTION" in res.group(4): - privileges.append('GRANT') - db = res.group(2) - output[db] = privileges - return output - -def privileges_unpack(priv): - """ Take a privileges string, typically passed as a parameter, and unserialize - it into a dictionary, the same format as privileges_get() above. We have this - custom format to avoid using YAML/JSON strings inside YAML playbooks. Example - of a privileges string: - - mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanother.*:ALL - - The privilege USAGE stands for no privileges, so we add that in on *.* if it's - not specified in the string, as MySQL will always provide this by default. - """ - output = {} - for item in priv.split('/'): - pieces = item.split(':') - if '.' in pieces[0]: - pieces[0] = pieces[0].split('.') - for idx, piece in enumerate(pieces): - if pieces[0][idx] != "*": - pieces[0][idx] = "`" + pieces[0][idx] + "`" - pieces[0] = '.'.join(pieces[0]) - - output[pieces[0]] = pieces[1].upper().split(',') - - if '*.*' not in output: - output['*.*'] = ['USAGE'] - - return output - -def privileges_revoke(cursor, user,host,db_table,grant_option): - if grant_option: - query = "REVOKE GRANT OPTION ON %s FROM '%s'@'%s'" % (db_table,user,host) - cursor.execute(query) - query = "REVOKE ALL PRIVILEGES ON %s FROM '%s'@'%s'" % (db_table,user,host) - cursor.execute(query) - -def privileges_grant(cursor, user,host,db_table,priv): - - priv_string = ",".join(filter(lambda x: x != 'GRANT', priv)) - query = "GRANT %s ON %s TO '%s'@'%s'" % (priv_string,db_table,user,host) - if 'GRANT' in priv: - query = query + " WITH GRANT OPTION" - cursor.execute(query) - - -def strip_quotes(s): - """ Remove surrounding single or double quotes - - >>> print strip_quotes('hello') - hello - >>> print strip_quotes('"hello"') - hello - >>> print strip_quotes("'hello'") - hello - >>> print strip_quotes("'hello") - 'hello - - """ - single_quote = "'" - double_quote = '"' - - if s.startswith(single_quote) and s.endswith(single_quote): - s = s.strip(single_quote) - elif s.startswith(double_quote) and s.endswith(double_quote): - s = s.strip(double_quote) - return s - - -def config_get(config, section, option): - """ Calls ConfigParser.get and strips quotes - - See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html - """ - return strip_quotes(config.get(section, option)) - - -def _safe_cnf_load(config, path): - - data = {'user':'', 'password':''} - - # read in user/pass - f = open(path, 'r') - for line in f.readlines(): - line = line.strip() - if line.startswith('user='): - data['user'] = line.split('=', 1)[1].strip() - if line.startswith('password=') or line.startswith('pass='): - data['password'] = line.split('=', 1)[1].strip() - f.close() - - # write out a new cnf file with only user/pass - fh, newpath = tempfile.mkstemp(prefix=path + '.') - f = open(newpath, 'wb') - f.write('[client]\n') - f.write('user=%s\n' % data['user']) - f.write('password=%s\n' % data['password']) - f.close() - - config.readfp(open(newpath)) - os.remove(newpath) - return config - -def load_mycnf(): - config = ConfigParser.RawConfigParser() - mycnf = os.path.expanduser('~/.my.cnf') - if not os.path.exists(mycnf): - return False - try: - config.readfp(open(mycnf)) - except (IOError): - return False - except: - config = _safe_cnf_load(config, mycnf) - - # We support two forms of passwords in .my.cnf, both pass= and password=, - # as these are both supported by MySQL. - try: - passwd = config_get(config, 'client', 'password') - except (ConfigParser.NoOptionError): - try: - passwd = config_get(config, 'client', 'pass') - except (ConfigParser.NoOptionError): - return False - - # If .my.cnf doesn't specify a user, default to user login name - try: - user = config_get(config, 'client', 'user') - except (ConfigParser.NoOptionError): - user = getpass.getuser() - creds = dict(user=user,passwd=passwd) - return creds - -def connect(module, login_user, login_password): - if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") - else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password, db="mysql") - return db_connection.cursor() - -# =========================================== -# Module execution. -# - -def main(): - module = AnsibleModule( - argument_spec = dict( - login_user=dict(default=None), - login_password=dict(default=None), - login_host=dict(default="localhost"), - login_port=dict(default="3306"), - login_unix_socket=dict(default=None), - user=dict(required=True, aliases=['name']), - password=dict(default=None), - host=dict(default="localhost"), - state=dict(default="present", choices=["absent", "present"]), - priv=dict(default=None), - append_privs=dict(type="bool", default="no"), - check_implicit_admin=dict(default=False), - ) - ) - user = module.params["user"] - password = module.params["password"] - host = module.params["host"] - state = module.params["state"] - priv = module.params["priv"] - check_implicit_admin = module.params['check_implicit_admin'] - append_privs = module.boolean(module.params["append_privs"]) - - if not mysqldb_found: - module.fail_json(msg="the python mysqldb module is required") - - if priv is not None: - try: - priv = privileges_unpack(priv) - except: - module.fail_json(msg="invalid privileges string") - - # Either the caller passes both a username and password with which to connect to - # mysql, or they pass neither and allow this module to read the credentials from - # ~/.my.cnf. - login_password = module.params["login_password"] - login_user = module.params["login_user"] - if login_user is None and login_password is None: - mycnf_creds = load_mycnf() - if mycnf_creds is False: - login_user = "root" - login_password = "" - else: - login_user = mycnf_creds["user"] - login_password = mycnf_creds["passwd"] - elif login_password is None or login_user is None: - module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") - - cursor = None - try: - if check_implicit_admin: - try: - cursor = connect(module, 'root', '') - except: - pass - - if not cursor: - cursor = connect(module, login_user, login_password) - except Exception, e: - module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") - - if state == "present": - if user_exists(cursor, user, host): - changed = user_mod(cursor, user, host, password, priv, append_privs) - else: - if password is None: - module.fail_json(msg="password parameter required when adding a user") - changed = user_add(cursor, user, host, password, priv) - elif state == "absent": - if user_exists(cursor, user, host): - changed = user_delete(cursor, user, host) - else: - changed = False - module.exit_json(changed=changed, user=user) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/database/mysql_variables b/library/database/mysql_variables deleted file mode 100644 index 7353fdd485d..00000000000 --- a/library/database/mysql_variables +++ /dev/null @@ -1,253 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" - -Ansible module to manage mysql variables -(c) 2013, Balazs Pocze -Certain parts are taken from Mark Theunissen's mysqldb module - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: mysql_variables - -short_description: Manage MySQL global variables -description: - - Query / Set MySQL variables -version_added: 1.3 -options: - variable: - description: - - Variable name to operate - required: True - value: - description: - - If set, then sets variable value to this - required: False - login_user: - description: - - username to connect mysql host, if defined login_password also needed. - required: False - login_password: - description: - - password to connect mysql host, if defined login_user also needed. - required: False - login_host: - description: - - mysql host to connect - required: False - login_unix_socket: - description: - - unix socket to connect mysql server -''' -EXAMPLES = ''' -# Check for sync_binlog setting -- mysql_variables: variable=sync_binlog - -# Set read_only variable to 1 -- mysql_variables: variable=read_only value=1 -''' - - -import ConfigParser -import os -import warnings - -try: - import MySQLdb -except ImportError: - mysqldb_found = False -else: - mysqldb_found = True - - -def typedvalue(value): - """ - Convert value to number whenever possible, return same value - otherwise. - - >>> typedvalue('3') - 3 - >>> typedvalue('3.0') - 3.0 - >>> typedvalue('foobar') - 'foobar' - - """ - try: - return int(value) - except ValueError: - pass - - try: - return float(value) - except ValueError: - pass - - return value - - -def getvariable(cursor, mysqlvar): - cursor.execute("SHOW VARIABLES LIKE '" + mysqlvar + "'") - mysqlvar_val = cursor.fetchall() - return mysqlvar_val - - -def setvariable(cursor, mysqlvar, value): - """ Set a global mysql variable to a given value - - The DB driver will handle quoting of the given value based on its - type, thus numeric strings like '3.0' or '8' are illegal, they - should be passed as numeric literals. - - """ - try: - cursor.execute("SET GLOBAL " + mysqlvar + " = %s", (value,)) - cursor.fetchall() - result = True - except Exception, e: - result = str(e) - return result - - -def strip_quotes(s): - """ Remove surrounding single or double quotes - - >>> print strip_quotes('hello') - hello - >>> print strip_quotes('"hello"') - hello - >>> print strip_quotes("'hello'") - hello - >>> print strip_quotes("'hello") - 'hello - - """ - single_quote = "'" - double_quote = '"' - - if s.startswith(single_quote) and s.endswith(single_quote): - s = s.strip(single_quote) - elif s.startswith(double_quote) and s.endswith(double_quote): - s = s.strip(double_quote) - return s - - -def config_get(config, section, option): - """ Calls ConfigParser.get and strips quotes - - See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html - """ - return strip_quotes(config.get(section, option)) - - -def load_mycnf(): - config = ConfigParser.RawConfigParser() - mycnf = os.path.expanduser('~/.my.cnf') - if not os.path.exists(mycnf): - return False - try: - config.readfp(open(mycnf)) - except (IOError): - return False - # We support two forms of passwords in .my.cnf, both pass= and password=, - # as these are both supported by MySQL. - try: - passwd = config_get(config, 'client', 'password') - except (ConfigParser.NoOptionError): - try: - passwd = config_get(config, 'client', 'pass') - except (ConfigParser.NoOptionError): - return False - - # If .my.cnf doesn't specify a user, default to user login name - try: - user = config_get(config, 'client', 'user') - except (ConfigParser.NoOptionError): - user = getpass.getuser() - creds = dict(user=user, passwd=passwd) - return creds - - -def main(): - module = AnsibleModule( - argument_spec = dict( - login_user=dict(default=None), - login_password=dict(default=None), - login_host=dict(default="localhost"), - login_unix_socket=dict(default=None), - variable=dict(default=None), - value=dict(default=None) - - ) - ) - user = module.params["login_user"] - password = module.params["login_password"] - host = module.params["login_host"] - mysqlvar = module.params["variable"] - value = module.params["value"] - if not mysqldb_found: - module.fail_json(msg="the python mysqldb module is required") - else: - warnings.filterwarnings('error', category=MySQLdb.Warning) - - # Either the caller passes both a username and password with which to connect to - # mysql, or they pass neither and allow this module to read the credentials from - # ~/.my.cnf. - login_password = module.params["login_password"] - login_user = module.params["login_user"] - if login_user is None and login_password is None: - mycnf_creds = load_mycnf() - if mycnf_creds is False: - login_user = "root" - login_password = "" - else: - login_user = mycnf_creds["user"] - login_password = mycnf_creds["passwd"] - elif login_password is None or login_user is None: - module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") - try: - if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") - else: - db_connection = MySQLdb.connect(host=module.params["login_host"], user=login_user, passwd=login_password, db="mysql") - cursor = db_connection.cursor() - except Exception, e: - module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") - if mysqlvar is None: - module.fail_json(msg="Cannot run without variable to operate with") - mysqlvar_val = getvariable(cursor, mysqlvar) - if value is None: - module.exit_json(msg=mysqlvar_val) - else: - if len(mysqlvar_val) < 1: - module.fail_json(msg="Variable not available", changed=False) - # Type values before using them - value_wanted = typedvalue(value) - value_actual = typedvalue(mysqlvar_val[0][1]) - if value_wanted == value_actual: - module.exit_json(msg="Variable already set to requested value", changed=False) - result = setvariable(cursor, mysqlvar, value_wanted) - if result is True: - module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, changed=True) - else: - module.fail_json(msg=result, changed=False) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/database/postgresql_db b/library/database/postgresql_db deleted file mode 100644 index 605be621601..00000000000 --- a/library/database/postgresql_db +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: postgresql_db -short_description: Add or remove PostgreSQL databases from a remote host. -description: - - Add or remove PostgreSQL databases from a remote host. -version_added: "0.6" -options: - name: - description: - - name of the database to add or remove - required: true - default: null - login_user: - description: - - The username used to authenticate with - required: false - default: null - login_password: - description: - - The password used to authenticate with - required: false - default: null - login_host: - description: - - Host running the database - required: false - default: localhost - owner: - description: - - Name of the role to set as owner of the database - required: false - default: null - port: - description: - - Database port to connect to. - required: false - default: 5432 - template: - description: - - Template used to create the database - required: false - default: null - encoding: - description: - - Encoding of the database - required: false - default: null - encoding: - description: - - Encoding of the database - required: false - default: null - lc_collate: - description: - - Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template. - required: false - default: null - lc_ctype: - description: - - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0) is used as template. - required: false - default: null - state: - description: - - The database state - required: false - default: present - choices: [ "present", "absent" ] -notes: - - The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host. - - This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on - the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module. -requirements: [ psycopg2 ] -author: Lorin Hochstein -''' - -EXAMPLES = ''' -# Create a new database with name "acme" -- postgresql_db: name=acme - -# Create a new database with name "acme" and specific encoding and locale -# settings. If a template different from "template0" is specified, encoding -# and locale settings must match those of the template. -- postgresql_db: name=acme - encoding='UTF-8' - lc_collate='de_DE.UTF-8' - lc_ctype='de_DE.UTF-8' - template='template0' -''' - -try: - import psycopg2 - import psycopg2.extras -except ImportError: - postgresqldb_found = False -else: - postgresqldb_found = True - -class NotSupportedError(Exception): - pass - - -# =========================================== -# PostgreSQL module specific support methods. -# - -def set_owner(cursor, db, owner): - query = "ALTER DATABASE \"%s\" OWNER TO \"%s\"" % (db, owner) - cursor.execute(query) - return True - -def get_encoding_id(cursor, encoding): - query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;" - cursor.execute(query, {'encoding': encoding}) - return cursor.fetchone()['encoding_id'] - -def get_db_info(cursor, db): - query = """ - SELECT rolname AS owner, - pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id, - datcollate AS lc_collate, datctype AS lc_ctype - FROM pg_database JOIN pg_roles ON pg_roles.oid = pg_database.datdba - WHERE datname = %(db)s - """ - cursor.execute(query, {'db':db}) - return cursor.fetchone() - -def db_exists(cursor, db): - query = "SELECT * FROM pg_database WHERE datname=%(db)s" - cursor.execute(query, {'db': db}) - return cursor.rowcount == 1 - -def db_delete(cursor, db): - if db_exists(cursor, db): - query = "DROP DATABASE \"%s\"" % db - cursor.execute(query) - return True - else: - return False - -def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype): - if not db_exists(cursor, db): - if owner: - owner = " OWNER \"%s\"" % owner - if template: - template = " TEMPLATE \"%s\"" % template - if encoding: - encoding = " ENCODING '%s'" % encoding - if lc_collate: - lc_collate = " LC_COLLATE '%s'" % lc_collate - if lc_ctype: - lc_ctype = " LC_CTYPE '%s'" % lc_ctype - query = 'CREATE DATABASE "%s"%s%s%s%s%s' % (db, owner, - template, encoding, - lc_collate, lc_ctype) - cursor.execute(query) - return True - else: - db_info = get_db_info(cursor, db) - if (encoding and - get_encoding_id(cursor, encoding) != db_info['encoding_id']): - raise NotSupportedError( - 'Changing database encoding is not supported. ' - 'Current encoding: %s' % db_info['encoding'] - ) - elif lc_collate and lc_collate != db_info['lc_collate']: - raise NotSupportedError( - 'Changing LC_COLLATE is not supported. ' - 'Current LC_COLLATE: %s' % db_info['lc_collate'] - ) - elif lc_ctype and lc_ctype != db_info['lc_ctype']: - raise NotSupportedError( - 'Changing LC_CTYPE is not supported.' - 'Current LC_CTYPE: %s' % db_info['lc_ctype'] - ) - elif owner and owner != db_info['owner']: - return set_owner(cursor, db, owner) - else: - return False - -def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype): - if not db_exists(cursor, db): - return False - else: - db_info = get_db_info(cursor, db) - if (encoding and - get_encoding_id(cursor, encoding) != db_info['encoding_id']): - return False - elif lc_collate and lc_collate != db_info['lc_collate']: - return False - elif lc_ctype and lc_ctype != db_info['lc_ctype']: - return False - elif owner and owner != db_info['owner']: - return False - else: - return True - -# =========================================== -# Module execution. -# - -def main(): - module = AnsibleModule( - argument_spec=dict( - login_user=dict(default="postgres"), - login_password=dict(default=""), - login_host=dict(default=""), - port=dict(default="5432"), - db=dict(required=True, aliases=['name']), - owner=dict(default=""), - template=dict(default=""), - encoding=dict(default=""), - lc_collate=dict(default=""), - lc_ctype=dict(default=""), - state=dict(default="present", choices=["absent", "present"]), - ), - supports_check_mode = True - ) - - if not postgresqldb_found: - module.fail_json(msg="the python psycopg2 module is required") - - db = module.params["db"] - port = module.params["port"] - owner = module.params["owner"] - template = module.params["template"] - encoding = module.params["encoding"] - lc_collate = module.params["lc_collate"] - lc_ctype = module.params["lc_ctype"] - state = module.params["state"] - changed = False - - # To use defaults values, keyword arguments must be absent, so - # check which values are empty and don't include in the **kw - # dictionary - params_map = { - "login_host":"host", - "login_user":"user", - "login_password":"password", - "port":"port" - } - kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() - if k in params_map and v != '' ) - try: - db_connection = psycopg2.connect(database="template1", **kw) - # Enable autocommit so we can create databases - if psycopg2.__version__ >= '2.4.2': - db_connection.autocommit = True - else: - db_connection.set_isolation_level(psycopg2 - .extensions - .ISOLATION_LEVEL_AUTOCOMMIT) - cursor = db_connection.cursor( - cursor_factory=psycopg2.extras.DictCursor) - except Exception, e: - module.fail_json(msg="unable to connect to database: %s" % e) - - try: - if module.check_mode: - if state == "absent": - changed = not db_exists(cursor, db) - elif state == "present": - changed = not db_matches(cursor, db, owner, template, encoding, - lc_collate, lc_ctype) - module.exit_json(changed=changed,db=db) - - if state == "absent": - changed = db_delete(cursor, db) - - elif state == "present": - changed = db_create(cursor, db, owner, template, encoding, - lc_collate, lc_ctype) - except NotSupportedError, e: - module.fail_json(msg=str(e)) - except Exception, e: - module.fail_json(msg="Database query failed: %s" % e) - - module.exit_json(changed=changed, db=db) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/database/postgresql_privs b/library/database/postgresql_privs deleted file mode 100644 index de5fa94fa48..00000000000 --- a/library/database/postgresql_privs +++ /dev/null @@ -1,613 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ ---- -module: postgresql_privs -version_added: "1.2" -short_description: Grant or revoke privileges on PostgreSQL database objects. -description: - - Grant or revoke privileges on PostgreSQL database objects. - - This module is basically a wrapper around most of the functionality of - PostgreSQL's GRANT and REVOKE statements with detection of changes - (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)) -options: - database: - description: - - Name of database to connect to. - - 'Alias: I(db)' - required: yes - state: - description: - - If C(present), the specified privileges are granted, if C(absent) they - are revoked. - required: no - default: present - choices: [present, absent] - privs: - description: - - Comma separated list of privileges to grant/revoke. - - 'Alias: I(priv)' - required: no - type: - description: - - Type of database object to set privileges on. - required: no - default: table - choices: [table, sequence, function, database, - schema, language, tablespace, group] - objs: - description: - - Comma separated list of database objects to set privileges on. - - If I(type) is C(table) or C(sequence), the special value - C(ALL_IN_SCHEMA) can be provided instead to specify all database - objects of type I(type) in the schema specified via I(schema). (This - also works with PostgreSQL < 9.0.) - - If I(type) is C(database), this parameter can be omitted, in which case - privileges are set for the database specified via I(database). - - 'If I(type) is I(function), colons (":") in object names will be - replaced with commas (needed to specify function signatures, see - examples)' - - 'Alias: I(obj)' - required: no - schema: - description: - - Schema that contains the database objects specified via I(objs). - - May only be provided if I(type) is C(table), C(sequence) or - C(function). Defaults to C(public) in these cases. - required: no - roles: - description: - - Comma separated list of role (user/group) names to set permissions for. - - The special value C(PUBLIC) can be provided instead to set permissions - for the implicitly defined PUBLIC group. - - 'Alias: I(role)' - required: yes - grant_option: - description: - - Whether C(role) may grant/revoke the specified privileges/group - memberships to others. - - Set to C(no) to revoke GRANT OPTION, leave unspecified to - make no changes. - - I(grant_option) only has an effect if I(state) is C(present). - - 'Alias: I(admin_option)' - required: no - choices: ['yes', 'no'] - host: - description: - - Database host address. If unspecified, connect via Unix socket. - - 'Alias: I(login_host)' - default: null - required: no - port: - description: - - Database port to connect to. - required: no - default: 5432 - login: - description: - - The username to authenticate with. - - 'Alias: I(login_user)' - default: postgres - password: - description: - - The password to authenticate with. - - 'Alias: I(login_password))' - default: null - required: no -notes: - - Default authentication assumes that postgresql_privs is run by the - C(postgres) user on the remote host. (Ansible's C(user) or C(sudo-user)). - - This module requires Python package I(psycopg2) to be installed on the - remote host. In the default case of the remote host also being the - PostgreSQL server, PostgreSQL has to be installed there as well, obviously. - For Debian/Ubuntu-based systems, install packages I(postgresql) and - I(python-psycopg2). - - Parameters that accept comma separated lists (I(privs), I(objs), I(roles)) - have singular alias names (I(priv), I(obj), I(role)). - - To revoke only C(GRANT OPTION) for a specific object, set I(state) to - C(present) and I(grant_option) to C(no) (see examples). - - Note that when revoking privileges from a role R, this role may still have - access via privileges granted to any role R is a member of including - C(PUBLIC). - - Note that when revoking privileges from a role R, you do so as the user - specified via I(login). If R has been granted the same privileges by - another user also, R can still access database objects via these privileges. - - When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs). -requirements: [psycopg2] -author: Bernhard Weitzhofer -""" - -EXAMPLES = """ -# On database "library": -# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors -# TO librarian, reader WITH GRANT OPTION -- postgresql_privs: > - database=library - state=present - privs=SELECT,INSERT,UPDATE - type=table - objs=books,authors - schema=public - roles=librarian,reader - grant_option=yes - -# Same as above leveraging default values: -- postgresql_privs: > - db=library - privs=SELECT,INSERT,UPDATE - objs=books,authors - roles=librarian,reader - grant_option=yes - -# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader -# Note that role "reader" will be *granted* INSERT privilege itself if this -# isn't already the case (since state=present). -- postgresql_privs: > - db=library - state=present - priv=INSERT - obj=books - role=reader - grant_option=no - -# REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader -# "public" is the default schema. This also works for PostgreSQL 8.x. -- postgresql_privs: > - db=library - state=absent - privs=INSERT,UPDATE - objs=ALL_IN_SCHEMA - role=reader - -# GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian -- postgresql_privs: > - db=library - privs=ALL - type=schema - objs=public,math - role=librarian - -# GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader -# Note the separation of arguments with colons. -- postgresql_privs: > - db=library - privs=ALL - type=function - obj=add(int:int) - schema=math - roles=librarian,reader - -# GRANT librarian, reader TO alice, bob WITH ADMIN OPTION -# Note that group role memberships apply cluster-wide and therefore are not -# restricted to database "library" here. -- postgresql_privs: > - db=library - type=group - objs=librarian,reader - roles=alice,bob - admin_option=yes - -# GRANT ALL PRIVILEGES ON DATABASE library TO librarian -# Note that here "db=postgres" specifies the database to connect to, not the -# database to grant privileges on (which is specified via the "objs" param) -- postgresql_privs: > - db=postgres - privs=ALL - type=database - obj=library - role=librarian - -# GRANT ALL PRIVILEGES ON DATABASE library TO librarian -# If objs is omitted for type "database", it defaults to the database -# to which the connection is established -- postgresql_privs: > - db=library - privs=ALL - type=database - role=librarian -""" - -try: - import psycopg2 - import psycopg2.extensions -except ImportError: - psycopg2 = None - - -class Error(Exception): - pass - - -# We don't have functools.partial in Python < 2.5 -def partial(f, *args, **kwargs): - """Partial function application""" - def g(*g_args, **g_kwargs): - new_kwargs = kwargs.copy() - new_kwargs.update(g_kwargs) - return f(*(args + g_args), **g_kwargs) - g.f = f - g.args = args - g.kwargs = kwargs - return g - - -class Connection(object): - """Wrapper around a psycopg2 connection with some convenience methods""" - - def __init__(self, params): - self.database = params.database - # To use defaults values, keyword arguments must be absent, so - # check which values are empty and don't include in the **kw - # dictionary - params_map = { - "host":"host", - "login":"user", - "password":"password", - "port":"port", - "database": "database", - } - kw = dict( (params_map[k], getattr(params, k)) for k in params_map - if getattr(params, k) != '' ) - self.connection = psycopg2.connect(**kw) - self.cursor = self.connection.cursor() - - - def commit(self): - self.connection.commit() - - - def rollback(self): - self.connection.rollback() - - @property - def encoding(self): - """Connection encoding in Python-compatible form""" - return psycopg2.extensions.encodings[self.connection.encoding] - - - ### Methods for querying database objects - - # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like - # phrases in GRANT or REVOKE statements, therefore alternative methods are - # provided here. - - def schema_exists(self, schema): - query = """SELECT count(*) - FROM pg_catalog.pg_namespace WHERE nspname = %s""" - self.cursor.execute(query, (schema,)) - return self.cursor.fetchone()[0] > 0 - - - def get_all_tables_in_schema(self, schema): - if not self.schema_exists(schema): - raise Error('Schema "%s" does not exist.' % schema) - query = """SELECT relname - FROM pg_catalog.pg_class c - JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - WHERE nspname = %s AND relkind = 'r'""" - self.cursor.execute(query, (schema,)) - return [t[0] for t in self.cursor.fetchall()] - - - def get_all_sequences_in_schema(self, schema): - if not self.schema_exists(schema): - raise Error('Schema "%s" does not exist.' % schema) - query = """SELECT relname - FROM pg_catalog.pg_class c - JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - WHERE nspname = %s AND relkind = 'S'""" - self.cursor.execute(query, (schema,)) - return [t[0] for t in self.cursor.fetchall()] - - - - ### Methods for getting access control lists and group membership info - - # To determine whether anything has changed after granting/revoking - # privileges, we compare the access control lists of the specified database - # objects before and afterwards. Python's list/string comparison should - # suffice for change detection, we should not actually have to parse ACLs. - # The same should apply to group membership information. - - def get_table_acls(self, schema, tables): - query = """SELECT relacl - FROM pg_catalog.pg_class c - JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - WHERE nspname = %s AND relkind = 'r' AND relname = ANY (%s) - ORDER BY relname""" - self.cursor.execute(query, (schema, tables)) - return [t[0] for t in self.cursor.fetchall()] - - - def get_sequence_acls(self, schema, sequences): - query = """SELECT relacl - FROM pg_catalog.pg_class c - JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s) - ORDER BY relname""" - self.cursor.execute(query, (schema, sequences)) - return [t[0] for t in self.cursor.fetchall()] - - - def get_function_acls(self, schema, function_signatures): - funcnames = [f.split('(', 1)[0] for f in function_signatures] - query = """SELECT proacl - FROM pg_catalog.pg_proc p - JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace - WHERE nspname = %s AND proname = ANY (%s) - ORDER BY proname, proargtypes""" - self.cursor.execute(query, (schema, funcnames)) - return [t[0] for t in self.cursor.fetchall()] - - - def get_schema_acls(self, schemas): - query = """SELECT nspacl FROM pg_catalog.pg_namespace - WHERE nspname = ANY (%s) ORDER BY nspname""" - self.cursor.execute(query, (schemas,)) - return [t[0] for t in self.cursor.fetchall()] - - - def get_language_acls(self, languages): - query = """SELECT lanacl FROM pg_catalog.pg_language - WHERE lanname = ANY (%s) ORDER BY lanname""" - self.cursor.execute(query, (languages,)) - return [t[0] for t in self.cursor.fetchall()] - - - def get_tablespace_acls(self, tablespaces): - query = """SELECT spcacl FROM pg_catalog.pg_tablespace - WHERE spcname = ANY (%s) ORDER BY spcname""" - self.cursor.execute(query, (tablespaces,)) - return [t[0] for t in self.cursor.fetchall()] - - - def get_database_acls(self, databases): - query = """SELECT datacl FROM pg_catalog.pg_database - WHERE datname = ANY (%s) ORDER BY datname""" - self.cursor.execute(query, (databases,)) - return [t[0] for t in self.cursor.fetchall()] - - - def get_group_memberships(self, groups): - query = """SELECT roleid, grantor, member, admin_option - FROM pg_catalog.pg_auth_members am - JOIN pg_catalog.pg_roles r ON r.oid = am.roleid - WHERE r.rolname = ANY(%s) - ORDER BY roleid, grantor, member""" - self.cursor.execute(query, (groups,)) - return self.cursor.fetchall() - - - ### Manipulating privileges - - def manipulate_privs(self, obj_type, privs, objs, roles, - state, grant_option, schema_qualifier=None): - """Manipulate database object privileges. - - :param obj_type: Type of database object to grant/revoke - privileges for. - :param privs: Either a list of privileges to grant/revoke - or None if type is "group". - :param objs: List of database objects to grant/revoke - privileges for. - :param roles: Either a list of role names or "PUBLIC" - for the implicitly defined "PUBLIC" group - :param state: "present" to grant privileges, "absent" to revoke. - :param grant_option: Only for state "present": If True, set - grant/admin option. If False, revoke it. - If None, don't change grant option. - :param schema_qualifier: Some object types ("TABLE", "SEQUENCE", - "FUNCTION") must be qualified by schema. - Ignored for other Types. - """ - # get_status: function to get current status - if obj_type == 'table': - get_status = partial(self.get_table_acls, schema_qualifier) - elif obj_type == 'sequence': - get_status = partial(self.get_sequence_acls, schema_qualifier) - elif obj_type == 'function': - get_status = partial(self.get_function_acls, schema_qualifier) - elif obj_type == 'schema': - get_status = self.get_schema_acls - elif obj_type == 'language': - get_status = self.get_language_acls - elif obj_type == 'tablespace': - get_status = self.get_tablespace_acls - elif obj_type == 'database': - get_status = self.get_database_acls - elif obj_type == 'group': - get_status = self.get_group_memberships - else: - raise Error('Unsupported database object type "%s".' % obj_type) - - # Return False (nothing has changed) if there are no objs to work on. - if not objs: - return False - - # obj_ids: quoted db object identifiers (sometimes schema-qualified) - if obj_type == 'function': - obj_ids = [] - for obj in objs: - try: - f, args = obj.split('(', 1) - except: - raise Error('Illegal function signature: "%s".' % obj) - obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args)) - elif obj_type in ['table', 'sequence']: - obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs] - else: - obj_ids = ['"%s"' % o for o in objs] - - # set_what: SQL-fragment specifying what to set for the target roless: - # Either group membership or privileges on objects of a certain type. - if obj_type == 'group': - set_what = ','.join(obj_ids) - else: - set_what = '%s ON %s %s' % (','.join(privs), obj_type, - ','.join(obj_ids)) - - # for_whom: SQL-fragment specifying for whom to set the above - if roles == 'PUBLIC': - for_whom = 'PUBLIC' - else: - for_whom = ','.join(['"%s"' % r for r in roles]) - - status_before = get_status(objs) - if state == 'present': - if grant_option: - if obj_type == 'group': - query = 'GRANT %s TO %s WITH ADMIN OPTION' - else: - query = 'GRANT %s TO %s WITH GRANT OPTION' - else: - query = 'GRANT %s TO %s' - self.cursor.execute(query % (set_what, for_whom)) - - # Only revoke GRANT/ADMIN OPTION if grant_option actually is False. - if grant_option == False: - if obj_type == 'group': - query = 'REVOKE ADMIN OPTION FOR %s FROM %s' - else: - query = 'REVOKE GRANT OPTION FOR %s FROM %s' - self.cursor.execute(query % (set_what, for_whom)) - else: - query = 'REVOKE %s FROM %s' - self.cursor.execute(query % (set_what, for_whom)) - status_after = get_status(objs) - return status_before != status_after - - -def main(): - module = AnsibleModule( - argument_spec = dict( - database=dict(required=True, aliases=['db']), - state=dict(default='present', choices=['present', 'absent']), - privs=dict(required=False, aliases=['priv']), - type=dict(default='table', - choices=['table', - 'sequence', - 'function', - 'database', - 'schema', - 'language', - 'tablespace', - 'group']), - objs=dict(required=False, aliases=['obj']), - schema=dict(required=False), - roles=dict(required=True, aliases=['role']), - grant_option=dict(required=False, type='bool', - aliases=['admin_option']), - host=dict(default='', aliases=['login_host']), - port=dict(type='int', default=5432), - login=dict(default='postgres', aliases=['login_user']), - password=dict(default='', aliases=['login_password']) - ), - supports_check_mode = True - ) - - # Create type object as namespace for module params - p = type('Params', (), module.params) - - # param "schema": default, allowed depends on param "type" - if p.type in ['table', 'sequence', 'function']: - p.schema = p.schema or 'public' - elif p.schema: - module.fail_json(msg='Argument "schema" is not allowed ' - 'for type "%s".' % p.type) - - # param "objs": default, required depends on param "type" - if p.type == 'database': - p.objs = p.objs or p.database - elif not p.objs: - module.fail_json(msg='Argument "objs" is required ' - 'for type "%s".' % p.type) - - # param "privs": allowed, required depends on param "type" - if p.type == 'group': - if p.privs: - module.fail_json(msg='Argument "privs" is not allowed ' - 'for type "group".') - elif not p.privs: - module.fail_json(msg='Argument "privs" is required ' - 'for type "%s".' % p.type) - - # Connect to Database - if not psycopg2: - module.fail_json(msg='Python module "psycopg2" must be installed.') - try: - conn = Connection(p) - except psycopg2.Error, e: - module.fail_json(msg='Could not connect to database: %s' % e) - - try: - # privs - if p.privs: - privs = p.privs.split(',') - else: - privs = None - - # objs: - if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA': - objs = conn.get_all_tables_in_schema(p.schema) - elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA': - objs = conn.get_all_sequences_in_schema(p.schema) - else: - objs = p.objs.split(',') - - # function signatures are encoded using ':' to separate args - if p.type == 'function': - objs = [obj.replace(':', ',') for obj in objs] - - # roles - if p.roles == 'PUBLIC': - roles = 'PUBLIC' - else: - roles = p.roles.split(',') - - changed = conn.manipulate_privs( - obj_type = p.type, - privs = privs, - objs = objs, - roles = roles, - state = p.state, - grant_option = p.grant_option, - schema_qualifier=p.schema - ) - - except Error, e: - conn.rollback() - module.fail_json(msg=e.message) - - except psycopg2.Error, e: - conn.rollback() - # psycopg2 errors come in connection encoding, reencode - msg = e.message.decode(conn.encoding).encode(sys.getdefaultencoding(), - 'replace') - module.fail_json(msg=msg) - - if module.check_mode: - conn.rollback() - else: - conn.commit() - module.exit_json(changed=changed) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/database/postgresql_user b/library/database/postgresql_user deleted file mode 100644 index 8af8c45d0c5..00000000000 --- a/library/database/postgresql_user +++ /dev/null @@ -1,526 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: postgresql_user -short_description: Adds or removes a users (roles) from a PostgreSQL database. -description: - - Add or remove PostgreSQL users (roles) from a remote host and, optionally, - grant the users access to an existing database or tables. - - The fundamental function of the module is to create, or delete, roles from - a PostgreSQL cluster. Privilege assignment, or removal, is an optional - step, which works on one database at a time. This allows for the module to - be called several times in the same module to modify the permissions on - different databases, or to grant permissions to already existing users. - - A user cannot be removed until all the privileges have been stripped from - the user. In such situation, if the module tries to remove the user it - will fail. To avoid this from happening the fail_on_user option signals - the module to try to remove the user, but if not possible keep going; the - module will report if changes happened and separately if the user was - removed or not. -version_added: "0.6" -options: - name: - description: - - name of the user (role) to add or remove - required: true - default: null - password: - description: - - set the user's password, before 1.4 this was required. - - "When passing an encrypted password, the encrypted parameter must also be true, and it must be generated with the format C('str[\\"md5\\"] + md5[ password + username ]'), resulting in a total of 35 characters. An easy way to do this is: C(echo \\"md5`echo -n \\"verysecretpasswordJOE\\" | md5`\\")." - required: false - default: null - db: - description: - - name of database where permissions will be granted - required: false - default: null - fail_on_user: - description: - - if C(yes), fail when user can't be removed. Otherwise just log and continue - required: false - default: 'yes' - choices: [ "yes", "no" ] - port: - description: - - Database port to connect to. - required: false - default: 5432 - login_user: - description: - - User (role) used to authenticate with PostgreSQL - required: false - default: postgres - login_password: - description: - - Password used to authenticate with PostgreSQL - required: false - default: null - login_host: - description: - - Host running PostgreSQL. - required: false - default: localhost - priv: - description: - - "PostgreSQL privileges string in the format: C(table:priv1,priv2)" - required: false - default: null - role_attr_flags: - description: - - "PostgreSQL role attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER" - required: false - default: null - choices: [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB", - "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ] - state: - description: - - The user (role) state - required: false - default: present - choices: [ "present", "absent" ] - encrypted: - description: - - denotes if the password is already encrypted. boolean. - required: false - default: false - version_added: '1.4' - expires: - description: - - sets the user's password expiration. - required: false - default: null - version_added: '1.4' -notes: - - The default authentication assumes that you are either logging in as or - sudo'ing to the postgres account on the host. - - This module uses psycopg2, a Python PostgreSQL database adapter. You must - ensure that psycopg2 is installed on the host before using this module. If - the remote host is the PostgreSQL server (which is the default case), then - PostgreSQL must also be installed on the remote host. For Ubuntu-based - systems, install the postgresql, libpq-dev, and python-psycopg2 packages - on the remote host before using this module. - - If you specify PUBLIC as the user, then the privilege changes will apply - to all users. You may not specify password or role_attr_flags when the - PUBLIC user is specified. -requirements: [ psycopg2 ] -author: Lorin Hochstein -''' - -EXAMPLES = ''' -# Create django user and grant access to database and products table -- postgresql_user: db=acme name=django password=ceec4eif7ya priv=CONNECT/products:ALL - -# Create rails user, grant privilege to create other databases and demote rails from super user status -- postgresql_user: name=rails password=secret role_attr_flags=CREATEDB,NOSUPERUSER - -# Remove test user privileges from acme -- postgresql_user: db=acme name=test priv=ALL/products:ALL state=absent fail_on_user=no - -# Remove test user from test database and the cluster -- postgresql_user: db=test name=test priv=ALL state=absent - -# Example privileges string format -INSERT,UPDATE/table:SELECT/anothertable:ALL - -# Remove an existing user's password -- postgresql_user: db=test user=test password=NULL -''' - -import re - -try: - import psycopg2 -except ImportError: - postgresqldb_found = False -else: - postgresqldb_found = True - -# =========================================== -# PostgreSQL module specific support methods. -# - - -def user_exists(cursor, user): - # The PUBLIC user is a special case that is always there - if user == 'PUBLIC': - return True - query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s" - cursor.execute(query, {'user': user}) - return cursor.rowcount > 0 - - -def user_add(cursor, user, password, role_attr_flags, encrypted, expires): - """Create a new database user (role).""" - query_password_data = dict() - query = 'CREATE USER "%(user)s"' % { "user": user} - if password is not None: - query = query + " WITH %(crypt)s" % { "crypt": encrypted } - query = query + " PASSWORD %(password)s" - query_password_data.update(password=password) - if expires is not None: - query = query + " VALID UNTIL '%(expires)s'" % { "expires": expires } - query = query + " " + role_attr_flags - cursor.execute(query, query_password_data) - return True - -def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires): - """Change user password and/or attributes. Return True if changed, False otherwise.""" - changed = False - - if user == 'PUBLIC': - if password is not None: - module.fail_json(msg="cannot change the password for PUBLIC user") - elif role_attr_flags != '': - module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user") - else: - return False - - # Handle passwords. - if password is not None or role_attr_flags is not None: - # Select password and all flag-like columns in order to verify changes. - query_password_data = dict() - select = "SELECT * FROM pg_authid where rolname=%(user)s" - cursor.execute(select, {"user": user}) - # Grab current role attributes. - current_role_attrs = cursor.fetchone() - - alter = 'ALTER USER "%(user)s"' % {"user": user} - if password is not None: - query_password_data.update(password=password) - alter = alter + " WITH %(crypt)s" % {"crypt": encrypted} - alter = alter + " PASSWORD %(password)s" - alter = alter + " %(flags)s" % {'flags': role_attr_flags} - elif role_attr_flags: - alter = alter + ' WITH ' + role_attr_flags - if expires is not None: - alter = alter + " VALID UNTIL '%(expires)s'" % { "exipres": expires } - - try: - cursor.execute(alter, query_password_data) - except psycopg2.InternalError, e: - if e.pgcode == '25006': - # Handle errors due to read-only transactions indicated by pgcode 25006 - # ERROR: cannot execute ALTER ROLE in a read-only transaction - changed = False - module.fail_json(msg=e.pgerror) - return changed - else: - raise psycopg2.InternalError, e - - # Grab new role attributes. - cursor.execute(select, {"user": user}) - new_role_attrs = cursor.fetchone() - - # Detect any differences between current_ and new_role_attrs. - for i in range(len(current_role_attrs)): - if current_role_attrs[i] != new_role_attrs[i]: - changed = True - - return changed - -def user_delete(cursor, user): - """Try to remove a user. Returns True if successful otherwise False""" - cursor.execute("SAVEPOINT ansible_pgsql_user_delete") - try: - cursor.execute("DROP USER \"%s\"" % user) - except: - cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete") - cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") - return False - - cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") - return True - -def has_table_privilege(cursor, user, table, priv): - query = 'SELECT has_table_privilege(%s, %s, %s)' - cursor.execute(query, (user, table, priv)) - return cursor.fetchone()[0] - -def get_table_privileges(cursor, user, table): - if '.' in table: - schema, table = table.split('.', 1) - else: - schema = 'public' - query = '''SELECT privilege_type FROM information_schema.role_table_grants - WHERE grantee=%s AND table_name=%s AND table_schema=%s''' - cursor.execute(query, (user, table, schema)) - return set([x[0] for x in cursor.fetchall()]) - - -def quote_pg_identifier(identifier): - """ - quote postgresql identifiers involving zero or more namespaces - """ - - if '"' in identifier: - # the user has supplied their own quoting. we have to hope they're - # doing it right. Maybe they have an unfortunately named table - # containing a period in the name, such as: "public"."users.2013" - return identifier - - tokens = identifier.strip().split(".") - quoted_tokens = [] - for token in tokens: - quoted_tokens.append('"%s"' % (token, )) - return ".".join(quoted_tokens) - -def grant_table_privilege(cursor, user, table, priv): - prev_priv = get_table_privileges(cursor, user, table) - query = 'GRANT %s ON TABLE %s TO %s' % ( - priv, quote_pg_identifier(table), quote_pg_identifier(user), ) - cursor.execute(query) - curr_priv = get_table_privileges(cursor, user, table) - return len(curr_priv) > len(prev_priv) - -def revoke_table_privilege(cursor, user, table, priv): - prev_priv = get_table_privileges(cursor, user, table) - query = 'REVOKE %s ON TABLE %s FROM %s' % ( - priv, quote_pg_identifier(table), quote_pg_identifier(user), ) - cursor.execute(query) - curr_priv = get_table_privileges(cursor, user, table) - return len(curr_priv) < len(prev_priv) - - -def get_database_privileges(cursor, user, db): - priv_map = { - 'C':'CREATE', - 'T':'TEMPORARY', - 'c':'CONNECT', - } - query = 'SELECT datacl FROM pg_database WHERE datname = %s' - cursor.execute(query, (db,)) - datacl = cursor.fetchone()[0] - if datacl is None: - return [] - r = re.search('%s=(C?T?c?)/[a-z]+\,?' % user, datacl) - if r is None: - return [] - o = [] - for v in r.group(1): - o.append(priv_map[v]) - return o - -def has_database_privilege(cursor, user, db, priv): - query = 'SELECT has_database_privilege(%s, %s, %s)' - cursor.execute(query, (user, db, priv)) - return cursor.fetchone()[0] - -def grant_database_privilege(cursor, user, db, priv): - prev_priv = get_database_privileges(cursor, user, db) - if user == "PUBLIC": - query = 'GRANT %s ON DATABASE \"%s\" TO PUBLIC' % (priv, db) - else: - query = 'GRANT %s ON DATABASE \"%s\" TO \"%s\"' % (priv, db, user) - cursor.execute(query) - curr_priv = get_database_privileges(cursor, user, db) - return len(curr_priv) > len(prev_priv) - -def revoke_database_privilege(cursor, user, db, priv): - prev_priv = get_database_privileges(cursor, user, db) - if user == "PUBLIC": - query = 'REVOKE %s ON DATABASE \"%s\" FROM PUBLIC' % (priv, db) - else: - query = 'REVOKE %s ON DATABASE \"%s\" FROM \"%s\"' % (priv, db, user) - cursor.execute(query) - curr_priv = get_database_privileges(cursor, user, db) - return len(curr_priv) < len(prev_priv) - -def revoke_privileges(cursor, user, privs): - if privs is None: - return False - - changed = False - for type_ in privs: - revoke_func = { - 'table':revoke_table_privilege, - 'database':revoke_database_privilege - }[type_] - for name, privileges in privs[type_].iteritems(): - for privilege in privileges: - changed = revoke_func(cursor, user, name, privilege)\ - or changed - - return changed - -def grant_privileges(cursor, user, privs): - if privs is None: - return False - - changed = False - for type_ in privs: - grant_func = { - 'table':grant_table_privilege, - 'database':grant_database_privilege - }[type_] - for name, privileges in privs[type_].iteritems(): - for privilege in privileges: - changed = grant_func(cursor, user, name, privilege)\ - or changed - - return changed - -def parse_role_attrs(role_attr_flags): - """ - Parse role attributes string for user creation. - Format: - - attributes[,attributes,...] - - Where: - - attributes := CREATEDB,CREATEROLE,NOSUPERUSER,... - """ - if ',' not in role_attr_flags: - return role_attr_flags - flag_set = role_attr_flags.split(",") - o_flags = " ".join(flag_set) - return o_flags - -def parse_privs(privs, db): - """ - Parse privilege string to determine permissions for database db. - Format: - - privileges[/privileges/...] - - Where: - - privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] | - TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...] - """ - if privs is None: - return privs - - o_privs = { - 'database':{}, - 'table':{} - } - for token in privs.split('/'): - if ':' not in token: - type_ = 'database' - name = db - priv_set = set(x.strip() for x in token.split(',')) - else: - type_ = 'table' - name, privileges = token.split(':', 1) - priv_set = set(x.strip() for x in privileges.split(',')) - - o_privs[type_][name] = priv_set - - return o_privs - -# =========================================== -# Module execution. -# - -def main(): - module = AnsibleModule( - argument_spec=dict( - login_user=dict(default="postgres"), - login_password=dict(default=""), - login_host=dict(default=""), - user=dict(required=True, aliases=['name']), - password=dict(default=None), - state=dict(default="present", choices=["absent", "present"]), - priv=dict(default=None), - db=dict(default=''), - port=dict(default='5432'), - fail_on_user=dict(type='bool', default='yes'), - role_attr_flags=dict(default=''), - encrypted=dict(type='bool', default='no'), - expires=dict(default=None) - ), - supports_check_mode = True - ) - - user = module.params["user"] - password = module.params["password"] - state = module.params["state"] - fail_on_user = module.params["fail_on_user"] - db = module.params["db"] - if db == '' and module.params["priv"] is not None: - module.fail_json(msg="privileges require a database to be specified") - privs = parse_privs(module.params["priv"], db) - port = module.params["port"] - role_attr_flags = parse_role_attrs(module.params["role_attr_flags"]) - if module.params["encrypted"]: - encrypted = "ENCRYPTED" - else: - encrypted = "UNENCRYPTED" - expires = module.params["expires"] - - if not postgresqldb_found: - module.fail_json(msg="the python psycopg2 module is required") - - # To use defaults values, keyword arguments must be absent, so - # check which values are empty and don't include in the **kw - # dictionary - params_map = { - "login_host":"host", - "login_user":"user", - "login_password":"password", - "port":"port", - "db":"database" - } - kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() - if k in params_map and v != "" ) - try: - db_connection = psycopg2.connect(**kw) - cursor = db_connection.cursor() - except Exception, e: - module.fail_json(msg="unable to connect to database: %s" % e) - - kw = dict(user=user) - changed = False - user_removed = False - - if state == "present": - if user_exists(cursor, user): - changed = user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires) - else: - changed = user_add(cursor, user, password, role_attr_flags, encrypted, expires) - changed = grant_privileges(cursor, user, privs) or changed - else: - if user_exists(cursor, user): - if module.check_mode: - changed = True - kw['user_removed'] = True - else: - changed = revoke_privileges(cursor, user, privs) - user_removed = user_delete(cursor, user) - changed = changed or user_removed - if fail_on_user and not user_removed: - msg = "unable to remove user" - module.fail_json(msg=msg) - kw['user_removed'] = user_removed - - if changed: - if module.check_mode: - db_connection.rollback() - else: - db_connection.commit() - - kw['changed'] = changed - module.exit_json(**kw) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/database/redis b/library/database/redis deleted file mode 100644 index eb9654631e7..00000000000 --- a/library/database/redis +++ /dev/null @@ -1,329 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: redis -short_description: Various redis commands, slave and flush -description: - - Unified utility to interact with redis instances. - 'slave' sets a redis instance in slave or master mode. - 'flush' flushes all the instance or a specified db. - 'config' (new in 1.6), ensures a configuration setting on an instance. -version_added: "1.3" -options: - command: - description: - - The selected redis command - required: true - default: null - choices: [ "slave", "flush", "config" ] - login_password: - description: - - The password used to authenticate with (usually not used) - required: false - default: null - login_host: - description: - - The host running the database - required: false - default: localhost - login_port: - description: - - The port to connect to - required: false - default: 6379 - master_host: - description: - - The host of the master instance [slave command] - required: false - default: null - master_port: - description: - - The port of the master instance [slave command] - required: false - default: null - slave_mode: - description: - - the mode of the redis instance [slave command] - required: false - default: slave - choices: [ "master", "slave" ] - db: - description: - - The database to flush (used in db mode) [flush command] - required: false - default: null - flush_mode: - description: - - Type of flush (all the dbs in a redis instance or a specific one) - [flush command] - required: false - default: all - choices: [ "all", "db" ] - name: - version_added: 1.6 - description: - - A redis config key. - required: false - default: null - value: - version_added: 1.6 - description: - - A redis config value. - required: false - default: null - - -notes: - - Requires the redis-py Python package on the remote host. You can - install it with pip (pip install redis) or with a package manager. - https://github.com/andymccurdy/redis-py - - If the redis master instance we are making slave of is password protected - this needs to be in the redis.conf in the masterauth variable - -requirements: [ redis ] -author: Xabier Larrakoetxea -''' - -EXAMPLES = ''' -# Set local redis instance to be slave of melee.island on port 6377 -- redis: command=slave master_host=melee.island master_port=6377 - -# Deactivate slave mode -- redis: command=slave slave_mode=master - -# Flush all the redis db -- redis: command=flush flush_mode=all - -# Flush only one db in a redis instance -- redis: command=flush db=1 flush_mode=db - -# Configure local redis to have 10000 max clients -- redis: command=config name=maxclients value=10000 - -# Configure local redis to have lua time limit of 100 ms -- redis: command=config name=lua-time-limit value=100 -''' - -try: - import redis -except ImportError: - redis_found = False -else: - redis_found = True - - -# =========================================== -# Redis module specific support methods. -# - -def set_slave_mode(client, master_host, master_port): - try: - return client.slaveof(master_host, master_port) - except Exception: - return False - - -def set_master_mode(client): - try: - return client.slaveof() - except Exception: - return False - - -def flush(client, db=None): - try: - if type(db) != int: - return client.flushall() - else: - # The passed client has been connected to the database already - return client.flushdb() - except Exception: - return False - - -# =========================================== -# Module execution. -# - -def main(): - module = AnsibleModule( - argument_spec = dict( - command=dict(default=None, choices=['slave', 'flush', 'config']), - login_password=dict(default=None), - login_host=dict(default='localhost'), - login_port=dict(default='6379'), - master_host=dict(default=None), - master_port=dict(default=None), - slave_mode=dict(default='slave', choices=['master', 'slave']), - db=dict(default=None), - flush_mode=dict(default='all', choices=['all', 'db']), - name=dict(default=None), - value=dict(default=None) - ), - supports_check_mode = True - ) - - if not redis_found: - module.fail_json(msg="python redis module is required") - - login_password = module.params['login_password'] - login_host = module.params['login_host'] - login_port = int(module.params['login_port']) - command = module.params['command'] - - # Slave Command section ----------- - if command == "slave": - master_host = module.params['master_host'] - master_port = module.params['master_port'] - try: - master_port = int(module.params['master_port']) - except Exception: - pass - mode = module.params['slave_mode'] - - #Check if we have all the data - if mode == "slave": # Only need data if we want to be slave - if not master_host: - module.fail_json( - msg='In slave mode master host must be provided') - - if not master_port: - module.fail_json( - msg='In slave mode master port must be provided') - - #Connect and check - r = redis.StrictRedis(host=login_host, - port=login_port, - password=login_password) - try: - r.ping() - except Exception, e: - module.fail_json(msg="unable to connect to database: %s" % e) - - #Check if we are already in the mode that we want - info = r.info() - if mode == "master" and info["role"] == "master": - module.exit_json(changed=False, mode=mode) - - elif mode == "slave" and\ - info["role"] == "slave" and\ - info["master_host"] == master_host and\ - info["master_port"] == master_port: - status = { - 'status': mode, - 'master_host': master_host, - 'master_port': master_port, - } - module.exit_json(changed=False, mode=status) - else: - # Do the stuff - # (Check Check_mode before commands so the commands aren't evaluated - # if not necessary) - if mode == "slave": - if module.check_mode or\ - set_slave_mode(r, master_host, master_port): - info = r.info() - status = { - 'status': mode, - 'master_host': master_host, - 'master_port': master_port, - } - module.exit_json(changed=True, mode=status) - else: - module.fail_json(msg='Unable to set slave mode') - - else: - if module.check_mode or set_master_mode(r): - module.exit_json(changed=True, mode=mode) - else: - module.fail_json(msg='Unable to set master mode') - - # flush Command section ----------- - elif command == "flush": - try: - db = int(module.params['db']) - except Exception: - db = 0 - mode = module.params['flush_mode'] - - #Check if we have all the data - if mode == "db": - if type(db) != int: - module.fail_json( - msg="In db mode the db number must be provided") - - #Connect and check - r = redis.StrictRedis(host=login_host, - port=login_port, - password=login_password, - db=db) - try: - r.ping() - except Exception, e: - module.fail_json(msg="unable to connect to database: %s" % e) - - # Do the stuff - # (Check Check_mode before commands so the commands aren't evaluated - # if not necessary) - if mode == "all": - if module.check_mode or flush(r): - module.exit_json(changed=True, flushed=True) - else: # Flush never fails :) - module.fail_json(msg="Unable to flush all databases") - - else: - if module.check_mode or flush(r, db): - module.exit_json(changed=True, flushed=True, db=db) - else: # Flush never fails :) - module.fail_json(msg="Unable to flush '%d' database" % db) - elif command == 'config': - name = module.params['name'] - value = module.params['value'] - - r = redis.StrictRedis(host=login_host, - port=login_port, - password=login_password) - - try: - r.ping() - except Exception, e: - module.fail_json(msg="unable to connect to database: %s" % e) - - - try: - old_value = r.config_get(name)[name] - except Exception, e: - module.fail_json(msg="unable to read config: %s" % e) - changed = old_value != value - - if module.check_mode or not changed: - module.exit_json(changed=changed, name=name, value=value) - else: - try: - r.config_set(name, value) - except Exception, e: - module.fail_json(msg="unable to write config: %s" % e) - module.exit_json(changed=changed, name=name, value=value) - else: - module.fail_json(msg='A valid command must be provided') - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/database/riak b/library/database/riak deleted file mode 100644 index b30e7dc485d..00000000000 --- a/library/database/riak +++ /dev/null @@ -1,255 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, James Martin , Drew Kerrigan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -DOCUMENTATION = ''' ---- -module: riak -short_description: This module handles some common Riak operations -description: - - This module can be used to join nodes to a cluster, check - the status of the cluster. -version_added: "1.2" -options: - command: - description: - - The command you would like to perform against the cluster. - required: false - default: null - aliases: [] - choices: ['ping', 'kv_test', 'join', 'plan', 'commit'] - config_dir: - description: - - The path to the riak configuration directory - required: false - default: /etc/riak - aliases: [] - http_conn: - description: - - The ip address and port that is listening for Riak HTTP queries - required: false - default: 127.0.0.1:8098 - aliases: [] - target_node: - description: - - The target node for certain operations (join, ping) - required: false - default: riak@127.0.0.1 - aliases: [] - wait_for_handoffs: - description: - - Number of seconds to wait for handoffs to complete. - required: false - default: null - aliases: [] - type: 'int' - wait_for_ring: - description: - - Number of seconds to wait for all nodes to agree on the ring. - required: false - default: null - aliases: [] - type: 'int' - wait_for_service: - description: - - Waits for a riak service to come online before continuing. - required: false - default: None - aliases: [] - choices: ['kv'] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 -''' - -EXAMPLES = ''' -# Join's a Riak node to another node -- riak: command=join target_node=riak@10.1.1.1 - -# Wait for handoffs to finish. Use with async and poll. -- riak: wait_for_handoffs=yes - -# Wait for riak_kv service to startup -- riak: wait_for_service=kv -''' - -import urllib2 -import time -import socket -import sys -try: - import json -except ImportError: - import simplejson as json - - -def ring_check(module, riak_admin_bin): - cmd = '%s ringready' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if rc == 0 and 'TRUE All nodes agree on the ring' in out: - return True - else: - return False - -def main(): - - module = AnsibleModule( - argument_spec=dict( - command=dict(required=False, default=None, choices=[ - 'ping', 'kv_test', 'join', 'plan', 'commit']), - config_dir=dict(default='/etc/riak'), - http_conn=dict(required=False, default='127.0.0.1:8098'), - target_node=dict(default='riak@127.0.0.1', required=False), - wait_for_handoffs=dict(default=False, type='int'), - wait_for_ring=dict(default=False, type='int'), - wait_for_service=dict( - required=False, default=None, choices=['kv']), - validate_certs = dict(default='yes', type='bool')) - ) - - - command = module.params.get('command') - config_dir = module.params.get('config_dir') - http_conn = module.params.get('http_conn') - target_node = module.params.get('target_node') - wait_for_handoffs = module.params.get('wait_for_handoffs') - wait_for_ring = module.params.get('wait_for_ring') - wait_for_service = module.params.get('wait_for_service') - validate_certs = module.params.get('validate_certs') - - - #make sure riak commands are on the path - riak_bin = module.get_bin_path('riak') - riak_admin_bin = module.get_bin_path('riak-admin') - - timeout = time.time() + 120 - while True: - if time.time() > timeout: - module.fail_json(msg='Timeout, could not fetch Riak stats.') - (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5) - if info['status'] == 200: - stats_raw = response.read() - break - time.sleep(5) - - # here we attempt to load those stats, - try: - stats = json.loads(stats_raw) - except: - module.fail_json(msg='Could not parse Riak stats.') - - node_name = stats['nodename'] - nodes = stats['ring_members'] - ring_size = stats['ring_creation_size'] - rc, out, err = module.run_command([riak_bin, 'version'] ) - version = out.strip() - - result = dict(node_name=node_name, - nodes=nodes, - ring_size=ring_size, - version=version) - - if command == 'ping': - cmd = '%s ping %s' % ( riak_bin, target_node ) - rc, out, err = module.run_command(cmd) - if rc == 0: - result['ping'] = out - else: - module.fail_json(msg=out) - - elif command == 'kv_test': - cmd = '%s test' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if rc == 0: - result['kv_test'] = out - else: - module.fail_json(msg=out) - - elif command == 'join': - if nodes.count(node_name) == 1 and len(nodes) > 1: - result['join'] = 'Node is already in cluster or staged to be in cluster.' - else: - cmd = '%s cluster join %s' % (riak_admin_bin, target_node) - rc, out, err = module.run_command(cmd) - if rc == 0: - result['join'] = out - result['changed'] = True - else: - module.fail_json(msg=out) - - elif command == 'plan': - cmd = '%s cluster plan' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if rc == 0: - result['plan'] = out - if 'Staged Changes' in out: - result['changed'] = True - else: - module.fail_json(msg=out) - - elif command == 'commit': - cmd = '%s cluster commit' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if rc == 0: - result['commit'] = out - result['changed'] = True - else: - module.fail_json(msg=out) - -# this could take a while, recommend to run in async mode - if wait_for_handoffs: - timeout = time.time() + wait_for_handoffs - while True: - cmd = '%s transfers' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if 'No transfers active' in out: - result['handoffs'] = 'No transfers active.' - break - time.sleep(10) - if time.time() > timeout: - module.fail_json(msg='Timeout waiting for handoffs.') - - if wait_for_service: - cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name ] - rc, out, err = module.run_command(cmd) - result['service'] = out - - if wait_for_ring: - timeout = time.time() + wait_for_ring - while True: - if ring_check(module, riak_admin_bin): - break - time.sleep(10) - if time.time() > timeout: - module.fail_json(msg='Timeout waiting for nodes to agree on ring.') - - result['ring_ready'] = ring_check(module, riak_admin_bin) - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/files/acl b/library/files/acl deleted file mode 100644 index 30c533e006c..00000000000 --- a/library/files/acl +++ /dev/null @@ -1,295 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: acl -version_added: "1.4" -short_description: Sets and retrieves file ACL information. -description: - - Sets and retrieves file ACL information. -options: - name: - required: true - default: null - description: - - The full path of the file or object. - aliases: ['path'] - - state: - required: false - default: query - choices: [ 'query', 'present', 'absent' ] - description: - - defines whether the ACL should be present or not. The C(query) state gets the current acl without changing it, for use in 'register' operations. - - follow: - required: false - default: yes - choices: [ 'yes', 'no' ] - description: - - whether to follow symlinks on the path if a symlink is encountered. - - default: - version_added: "1.5" - required: false - default: no - choices: [ 'yes', 'no' ] - description: - - if the target is a directory, setting this to yes will make it the default acl for entities created inside the directory. It causes an error if name is a file. - - entity: - version_added: "1.5" - required: false - description: - - actual user or group that the ACL applies to when matching entity types user or group are selected. - - etype: - version_added: "1.5" - required: false - default: null - choices: [ 'user', 'group', 'mask', 'other' ] - description: - - the entity type of the ACL to apply, see setfacl documentation for more info. - - - permissions: - version_added: "1.5" - required: false - default: null - description: - - Permissions to apply/remove can be any combination of r, w and x (read, write and execute respectively) - - entry: - required: false - default: null - description: - - DEPRECATED. The acl to set or remove. This must always be quoted in the form of '::'. The qualifier may be empty for some types, but the type and perms are always requried. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields. - -author: Brian Coca -notes: - - The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed. -''' - -EXAMPLES = ''' -# Grant user Joe read access to a file -- acl: name=/etc/foo.conf entity=joe etype=user permissions="r" state=present - -# Removes the acl for Joe on a specific file -- acl: name=/etc/foo.conf entity=joe etype=user state=absent - -# Sets default acl for joe on foo.d -- acl: name=/etc/foo.d entity=joe etype=user permissions=rw default=yes state=present - -# Same as previous but using entry shorthand -- acl: name=/etc/foo.d entry="default:user:joe:rw-" state=present - -# Obtain the acl for a specific file -- acl: name=/etc/foo.conf - register: acl_info -''' - -def normalize_permissions(p): - perms = ['-','-','-'] - for char in p: - if char == 'r': - perms[0] = 'r' - if char == 'w': - perms[1] = 'w' - if char == 'x': - perms[2] = 'x' - return ''.join(perms) - -def split_entry(entry): - ''' splits entry and ensures normalized return''' - - a = entry.split(':') - a.reverse() - if len(a) == 3: - a.append(False) - try: - p,e,t,d = a - except ValueError, e: - print "wtf?? %s => %s" % (entry,a) - raise e - - if d: - d = True - - if t.startswith("u"): - t = "user" - elif t.startswith("g"): - t = "group" - elif t.startswith("m"): - t = "mask" - elif t.startswith("o"): - t = "other" - else: - t = None - - p = normalize_permissions(p) - - return [d,t,e,p] - -def get_acls(module,path,follow): - - cmd = [ module.get_bin_path('getfacl', True) ] - if not follow: - cmd.append('-h') - # prevents absolute path warnings and removes headers - cmd.append('--omit-header') - cmd.append('--absolute-names') - cmd.append(path) - - return _run_acl(module,cmd) - -def set_acl(module,path,entry,follow,default): - - cmd = [ module.get_bin_path('setfacl', True) ] - if not follow: - cmd.append('-h') - if default: - cmd.append('-d') - cmd.append('-m "%s"' % entry) - cmd.append(path) - - return _run_acl(module,cmd) - -def rm_acl(module,path,entry,follow,default): - - cmd = [ module.get_bin_path('setfacl', True) ] - if not follow: - cmd.append('-h') - if default: - cmd.append('-k') - entry = entry[0:entry.rfind(':')] - cmd.append('-x "%s"' % entry) - cmd.append(path) - - return _run_acl(module,cmd,False) - -def _run_acl(module,cmd,check_rc=True): - - try: - (rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc) - except Exception, e: - module.fail_json(msg=e.strerror) - - # trim last line as it is always empty - ret = out.splitlines() - return ret[0:len(ret)-1] - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True,aliases=['path'], type='str'), - entry = dict(required=False, etype='str'), - entity = dict(required=False, type='str', default=''), - etype = dict(required=False, choices=['other', 'user', 'group', 'mask'], type='str'), - permissions = dict(required=False, type='str'), - state = dict(required=False, default='query', choices=[ 'query', 'present', 'absent' ], type='str'), - follow = dict(required=False, type='bool', default=True), - default= dict(required=False, type='bool', default=False), - ), - supports_check_mode=True, - ) - - path = os.path.expanduser(module.params.get('name')) - entry = module.params.get('entry') - entity = module.params.get('entity') - etype = module.params.get('etype') - permissions = module.params.get('permissions') - state = module.params.get('state') - follow = module.params.get('follow') - default = module.params.get('default') - - if permissions: - permissions = normalize_permissions(permissions) - - if not os.path.exists(path): - module.fail_json(msg="path not found or not accessible!") - - if state in ['present','absent']: - if not entry and not etype: - module.fail_json(msg="%s requires either etype and permissions or just entry be set" % state) - - if entry: - if etype or entity or permissions: - module.fail_json(msg="entry and another incompatible field (entity, etype or permissions) are also set") - if entry.count(":") not in [2,3]: - module.fail_json(msg="Invalid entry: '%s', it requires 3 or 4 sections divided by ':'" % entry) - - default, etype, entity, permissions = split_entry(entry) - - changed=False - msg = "" - currentacls = get_acls(module,path,follow) - - if (state == 'present'): - matched = False - for oldentry in currentacls: - if oldentry.count(":") == 0: - continue - old_default, old_type, old_entity, old_permissions = split_entry(oldentry) - if old_default == default: - if old_type == etype: - if etype in ['user', 'group']: - if old_entity == entity: - matched = True - if not old_permissions == permissions: - changed = True - break - else: - matched = True - if not old_permissions == permissions: - changed = True - break - if not matched: - changed=True - - if changed and not module.check_mode: - set_acl(module,path,':'.join([etype, str(entity), permissions]),follow,default) - msg="%s is present" % ':'.join([etype, str(entity), permissions]) - - elif state == 'absent': - for oldentry in currentacls: - if oldentry.count(":") == 0: - continue - old_default, old_type, old_entity, old_permissions = split_entry(oldentry) - if old_default == default: - if old_type == etype: - if etype in ['user', 'group']: - if old_entity == entity: - changed=True - break - else: - changed=True - break - if changed and not module.check_mode: - rm_acl(module,path,':'.join([etype, entity, '---']),follow,default) - msg="%s is absent" % ':'.join([etype, entity, '---']) - else: - msg="current acl" - - if changed: - currentacls = get_acls(module,path,follow) - - module.exit_json(changed=changed, msg=msg, acl=currentacls) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/files/assemble b/library/files/assemble deleted file mode 100644 index a16431b9f52..00000000000 --- a/library/files/assemble +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Stephen Fromm -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import os -import os.path -import shutil -import tempfile -import re - -DOCUMENTATION = ''' ---- -module: assemble -short_description: Assembles a configuration file from fragments -description: - - Assembles a configuration file from fragments. Often a particular - program will take a single configuration file and does not support a - C(conf.d) style structure where it is easy to build up the configuration - from multiple sources. M(assemble) will take a directory of files that can be - local or have already been transferred to the system, and concatenate them - together to produce a destination file. Files are assembled in string sorting order. - Puppet calls this idea I(fragments). -version_added: "0.5" -options: - src: - description: - - An already existing directory full of source files. - required: true - default: null - aliases: [] - dest: - description: - - A file to create using the concatenation of all of the source files. - required: true - default: null - backup: - description: - - Create a backup file (if C(yes)), including the timestamp information so - you can get the original file back if you somehow clobbered it - incorrectly. - required: false - choices: [ "yes", "no" ] - default: "no" - delimiter: - description: - - A delimiter to separate the file contents. - version_added: "1.4" - required: false - default: null - remote_src: - description: - - If False, it will search for src at originating/master machine, if True it will - go to the remote/target machine for the src. Default is True. - choices: [ "True", "False" ] - required: false - default: "True" - version_added: "1.4" - regexp: - description: - - Assemble files only if C(regex) matches the filename. If not set, - all files are assembled. All "\\" (backslash) must be escaped as - "\\\\" to comply yaml syntax. Uses Python regular expressions; see - U(http://docs.python.org/2/library/re.html). - required: false - default: null -author: Stephen Fromm -extends_documentation_fragment: files -''' - -EXAMPLES = ''' -# Example from Ansible Playbooks -- assemble: src=/etc/someapp/fragments dest=/etc/someapp/someapp.conf - -# When a delimiter is specified, it will be inserted in between each fragment -- assemble: src=/etc/someapp/fragments dest=/etc/someapp/someapp.conf delimiter='### START FRAGMENT ###' -''' - -# =========================================== -# Support method - -def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None): - ''' assemble a file from a directory of fragments ''' - tmpfd, temp_path = tempfile.mkstemp() - tmp = os.fdopen(tmpfd,'w') - delimit_me = False - add_newline = False - - for f in sorted(os.listdir(src_path)): - if compiled_regexp and not compiled_regexp.search(f): - continue - fragment = "%s/%s" % (src_path, f) - if not os.path.isfile(fragment): - continue - fragment_content = file(fragment).read() - - # always put a newline between fragments if the previous fragment didn't end with a newline. - if add_newline: - tmp.write('\n') - - # delimiters should only appear between fragments - if delimit_me: - if delimiter: - # un-escape anything like newlines - delimiter = delimiter.decode('unicode-escape') - tmp.write(delimiter) - # always make sure there's a newline after the - # delimiter, so lines don't run together - if delimiter[-1] != '\n': - tmp.write('\n') - - tmp.write(fragment_content) - delimit_me = True - if fragment_content.endswith('\n'): - add_newline = False - else: - add_newline = True - - tmp.close() - return temp_path - -# ============================================================== -# main - -def main(): - - module = AnsibleModule( - # not checking because of daisy chain to file module - argument_spec = dict( - src = dict(required=True), - delimiter = dict(required=False), - dest = dict(required=True), - backup=dict(default=False, type='bool'), - remote_src=dict(default=False, type='bool'), - regexp = dict(required=False), - ), - add_file_common_args=True - ) - - changed = False - pathmd5 = None - destmd5 = None - src = os.path.expanduser(module.params['src']) - dest = os.path.expanduser(module.params['dest']) - backup = module.params['backup'] - delimiter = module.params['delimiter'] - regexp = module.params['regexp'] - compiled_regexp = None - - if not os.path.exists(src): - module.fail_json(msg="Source (%s) does not exist" % src) - - if not os.path.isdir(src): - module.fail_json(msg="Source (%s) is not a directory" % src) - - if regexp != None: - try: - compiled_regexp = re.compile(regexp) - except re.error, e: - module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (e, regexp)) - - path = assemble_from_fragments(src, delimiter, compiled_regexp) - pathmd5 = module.md5(path) - - if os.path.exists(dest): - destmd5 = module.md5(dest) - - if pathmd5 != destmd5: - if backup and destmd5 is not None: - module.backup_local(dest) - shutil.copy(path, dest) - changed = True - - os.remove(path) - - file_args = module.load_file_common_arguments(module.params) - changed = module.set_fs_attributes_if_different(file_args, changed) - # Mission complete - module.exit_json(src=src, dest=dest, md5sum=pathmd5, changed=changed, msg="OK") - -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/files/copy b/library/files/copy deleted file mode 100644 index 5aef3d72221..00000000000 --- a/library/files/copy +++ /dev/null @@ -1,251 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import os -import time - -DOCUMENTATION = ''' ---- -module: copy -version_added: "historical" -short_description: Copies files to remote locations. -description: - - The M(copy) module copies a file on the local box to remote locations. -options: - src: - description: - - Local path to a file to copy to the remote server; can be absolute or relative. - If path is a directory, it is copied recursively. In this case, if path ends - with "/", only inside contents of that directory are copied to destination. - Otherwise, if it does not end with "/", the directory itself with all contents - is copied. This behavior is similar to Rsync. - required: false - default: null - aliases: [] - content: - version_added: "1.1" - description: - - When used instead of 'src', sets the contents of a file directly to the specified value. - required: false - default: null - dest: - description: - - Remote absolute path where the file should be copied to. If src is a directory, - this must be a directory too. - required: true - default: null - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - version_added: "0.7" - required: false - choices: [ "yes", "no" ] - default: "no" - force: - description: - - the default is C(yes), which will replace the remote file when contents - are different than the source. If C(no), the file will only be transferred - if the destination does not exist. - version_added: "1.1" - required: false - choices: [ "yes", "no" ] - default: "yes" - aliases: [ "thirsty" ] - validate: - description: - - The validation command to run before copying into place. The path to the file to - validate is passed in via '%s' which must be present as in the visudo example below. - The command is passed securely so shell features like expansion and pipes won't work. - required: false - default: "" - version_added: "1.2" - directory_mode: - description: - - When doing a recursive copy set the mode for the directories. If this is not set we will use the system - defaults. The mode is only set on directories which are newly created, and will not affect those that - already existed. - required: false - version_added: "1.5" -extends_documentation_fragment: files -author: Michael DeHaan -notes: - - The "copy" module recursively copy facility does not scale to lots (>hundreds) of files. - For alternative, see synchronize module, which is a wrapper around rsync. -''' - -EXAMPLES = ''' -# Example from Ansible Playbooks -- copy: src=/srv/myfiles/foo.conf dest=/etc/foo.conf owner=foo group=foo mode=0644 - -# The same example as above, but using a symbolic mode equivalent to 0644 -- copy: src=/srv/myfiles/foo.conf dest=/etc/foo.conf owner=foo group=foo mode="u=rw,g=r,o=r" - -# Another symbolic mode example, adding some permissions and removing others -- copy: src=/srv/myfiles/foo.conf dest=/etc/foo.conf owner=foo group=foo mode="u+rw,g-wx,o-rwx" - -# Copy a new "ntp.conf file into place, backing up the original if it differs from the copied version -- copy: src=/mine/ntp.conf dest=/etc/ntp.conf owner=root group=root mode=644 backup=yes - -# Copy a new "sudoers" file into place, after passing validation with visudo -- copy: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s' -''' - - -def split_pre_existing_dir(dirname): - ''' - Return the first pre-existing directory and a list of the new directories that will be created. - ''' - - head, tail = os.path.split(dirname) - if not os.path.exists(head): - (pre_existing_dir, new_directory_list) = split_pre_existing_dir(head) - else: - return (head, [ tail ]) - new_directory_list.append(tail) - return (pre_existing_dir, new_directory_list) - - -def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed): - ''' - Walk the new directories list and make sure that permissions are as we would expect - ''' - - if len(new_directory_list) > 0: - working_dir = os.path.join(pre_existing_dir, new_directory_list.pop(0)) - directory_args['path'] = working_dir - changed = module.set_fs_attributes_if_different(directory_args, changed) - changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed) - return changed - - -def main(): - - module = AnsibleModule( - # not checking because of daisy chain to file module - argument_spec = dict( - src = dict(required=False), - original_basename = dict(required=False), # used to handle 'dest is a directory' via template, a slight hack - content = dict(required=False, no_log=True), - dest = dict(required=True), - backup = dict(default=False, type='bool'), - force = dict(default=True, aliases=['thirsty'], type='bool'), - validate = dict(required=False, type='str'), - directory_mode = dict(required=False) - ), - add_file_common_args=True, - supports_check_mode=True, - ) - - src = os.path.expanduser(module.params['src']) - dest = os.path.expanduser(module.params['dest']) - backup = module.params['backup'] - force = module.params['force'] - original_basename = module.params.get('original_basename',None) - validate = module.params.get('validate',None) - - if not os.path.exists(src): - module.fail_json(msg="Source %s failed to transfer" % (src)) - if not os.access(src, os.R_OK): - module.fail_json(msg="Source %s not readable" % (src)) - - md5sum_src = module.md5(src) - md5sum_dest = None - - changed = False - - # Special handling for recursive copy - create intermediate dirs - if original_basename and dest.endswith("/"): - dest = os.path.join(dest, original_basename) - dirname = os.path.dirname(dest) - if not os.path.exists(dirname): - (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname) - os.makedirs(dirname) - directory_args = module.load_file_common_arguments(module.params) - directory_mode = module.params["directory_mode"] - if directory_mode is not None: - directory_args['mode'] = directory_mode - else: - directory_args['mode'] = None - adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed) - - if os.path.exists(dest): - if not force: - module.exit_json(msg="file already exists", src=src, dest=dest, changed=False) - if (os.path.isdir(dest)): - basename = os.path.basename(src) - if original_basename: - basename = original_basename - dest = os.path.join(dest, basename) - if os.access(dest, os.R_OK): - md5sum_dest = module.md5(dest) - else: - if not os.path.exists(os.path.dirname(dest)): - try: - # os.path.exists() can return false in some - # circumstances where the directory does not have - # the execute bit for the current user set, in - # which case the stat() call will raise an OSError - os.stat(os.path.dirname(dest)) - except OSError, e: - if "permission denied" in str(e).lower(): - module.fail_json(msg="Destination directory %s is not accessible" % (os.path.dirname(dest))) - module.fail_json(msg="Destination directory %s does not exist" % (os.path.dirname(dest))) - if not os.access(os.path.dirname(dest), os.W_OK): - module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest))) - - backup_file = None - if md5sum_src != md5sum_dest or os.path.islink(dest): - try: - if backup: - if os.path.exists(dest): - backup_file = module.backup_local(dest) - # allow for conversion from symlink. - if os.path.islink(dest): - os.unlink(dest) - open(dest, 'w').close() - if validate: - if "%s" not in validate: - module.fail_json(msg="validate must contain %%s: %s" % (validate)) - (rc,out,err) = module.run_command(validate % src) - if rc != 0: - module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc,err)) - module.atomic_move(src, dest) - except IOError: - module.fail_json(msg="failed to copy: %s to %s" % (src, dest)) - changed = True - else: - changed = False - - res_args = dict( - dest = dest, src = src, md5sum = md5sum_src, changed = changed - ) - if backup_file: - res_args['backup_file'] = backup_file - - module.params['dest'] = dest - file_args = module.load_file_common_arguments(module.params) - res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed']) - - module.exit_json(**res_args) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/files/fetch b/library/files/fetch deleted file mode 100644 index 5b47d87a856..00000000000 --- a/library/files/fetch +++ /dev/null @@ -1,67 +0,0 @@ -# this is a virtual module that is entirely implemented server side - -DOCUMENTATION = ''' ---- -module: fetch -short_description: Fetches a file from remote nodes -description: - - This module works like M(copy), but in reverse. It is used for fetching - files from remote machines and storing them locally in a file tree, - organized by hostname. Note that this module is written to transfer - log files that might not be present, so a missing remote file won't - be an error unless fail_on_missing is set to 'yes'. -version_added: "0.2" -options: - src: - description: - - The file on the remote system to fetch. This I(must) be a file, not a - directory. Recursive fetching may be supported in a later release. - required: true - default: null - aliases: [] - dest: - description: - - A directory to save the file into. For example, if the I(dest) - directory is C(/backup) a I(src) file named C(/etc/profile) on host - C(host.example.com), would be saved into - C(/backup/host.example.com/etc/profile) - required: true - default: null - fail_on_missing: - version_added: "1.1" - description: - - Makes it fails when the source file is missing. - required: false - choices: [ "yes", "no" ] - default: "no" - validate_md5: - version_added: "1.4" - description: - - Verify that the source and destination md5sums match after the files are fetched. - required: false - choices: [ "yes", "no" ] - default: "yes" - flat: - version_added: "1.2" - description: - Allows you to override the default behavior of prepending hostname/path/to/file to - the destination. If dest ends with '/', it will use the basename of the source - file, similar to the copy module. Obviously this is only handy if the filenames - are unique. -requirements: [] -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Store file into /tmp/fetched/host.example.com/tmp/somefile -- fetch: src=/tmp/somefile dest=/tmp/fetched - -# Specifying a path directly -- fetch: src=/tmp/somefile dest=/tmp/prefix-{{ ansible_hostname }} flat=yes - -# Specifying a destination path -- fetch: src=/tmp/uniquefile dest=/tmp/special/ flat=yes - -# Storing in a path relative to the playbook -- fetch: src=/tmp/uniquefile dest=special/prefix-{{ ansible_hostname }} flat=yes -''' diff --git a/library/files/file b/library/files/file deleted file mode 100644 index 82f4d5016d5..00000000000 --- a/library/files/file +++ /dev/null @@ -1,344 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import shutil -import stat -import grp -import pwd -try: - import selinux - HAVE_SELINUX=True -except ImportError: - HAVE_SELINUX=False - -DOCUMENTATION = ''' ---- -module: file -version_added: "historical" -short_description: Sets attributes of files -extends_documentation_fragment: files -description: - - Sets attributes of files, symlinks, and directories, or removes - files/symlinks/directories. Many other modules support the same options as - the M(file) module - including M(copy), M(template), and M(assemble). -notes: - - See also M(copy), M(template), M(assemble) -requirements: [ ] -author: Michael DeHaan -options: - path: - description: - - 'path to the file being managed. Aliases: I(dest), I(name)' - required: true - default: [] - aliases: ['dest', 'name'] - state: - description: - - If C(directory), all immediate subdirectories will be created if they - do not exist, since 1.7 they will be created with the supplied permissions. - If C(file), the file will NOT be created if it does not exist, see the M(copy) - or M(template) module if you want that behavior. If C(link), the symbolic - link will be created or changed. Use C(hard) for hardlinks. If C(absent), - directories will be recursively deleted, and files or symlinks will be unlinked. - If C(touch) (new in 1.4), an empty file will be created if the c(path) does not - exist, while an existing file or directory will receive updated file access and - modification times (similar to the way `touch` works from the command line). - required: false - default: file - choices: [ file, link, directory, hard, touch, absent ] - src: - required: false - default: null - choices: [] - description: - - path of the file to link to (applies only to C(state=link)). Will accept absolute, - relative and nonexisting paths. Relative paths are not expanded. - recurse: - required: false - default: "no" - choices: [ "yes", "no" ] - version_added: "1.1" - description: - - recursively set the specified file attributes (applies only to state=directory) - force: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - 'force the creation of the symlinks in two cases: the source file does - not exist (but will appear later); the destination exists and is a file (so, we need to unlink the - "path" file and create symlink to the "src" file in place of it).' -''' - -EXAMPLES = ''' -- file: path=/etc/foo.conf owner=foo group=foo mode=0644 -- file: src=/file/to/link/to dest=/path/to/symlink owner=foo group=foo state=link -- file: src=/tmp/{{ item.path }} dest={{ item.dest }} state=link - with_items: - - { path: 'x', dest: 'y' } - - { path: 'z', dest: 'k' } - -# touch a file, using symbolic modes to set the permissions (equivalent to 0644) -- file: path=/etc/foo.conf state=touch mode="u=rw,g=r,o=r" - -# touch the same file, but add/remove some permissions -- file: path=/etc/foo.conf state=touch mode="u+rw,g-wx,o-rwx" - -''' - -def main(): - - module = AnsibleModule( - argument_spec = dict( - state = dict(choices=['file','directory','link','hard','touch','absent'], default=None), - path = dict(aliases=['dest', 'name'], required=True), - original_basename = dict(required=False), # Internal use only, for recursive ops - recurse = dict(default='no', type='bool'), - force = dict(required=False,default=False,type='bool'), - diff_peek = dict(default=None), - validate = dict(required=False, default=None), - src = dict(required=False, default=None), - ), - add_file_common_args=True, - supports_check_mode=True - ) - - params = module.params - state = params['state'] - force = params['force'] - diff_peek = params['diff_peek'] - src = params['src'] - - # modify source as we later reload and pass, specially relevant when used by other modules. - params['path'] = path = os.path.expanduser(params['path']) - - # short-circuit for diff_peek - if diff_peek is not None: - appears_binary = False - try: - f = open(path) - b = f.read(8192) - f.close() - if "\x00" in b: - appears_binary = True - except: - pass - module.exit_json(path=path, changed=False, appears_binary=appears_binary) - - # Find out current state - prev_state = 'absent' - if os.path.lexists(path): - if os.path.islink(path): - prev_state = 'link' - elif os.path.isdir(path): - prev_state = 'directory' - elif os.stat(path).st_nlink > 1: - prev_state = 'hard' - else: - # could be many other things, but defaulting to file - prev_state = 'file' - - # state should default to file, but since that creates many conflicts, - # default to 'current' when it exists. - if state is None: - if prev_state != 'absent': - state = prev_state - else: - state = 'file' - - # source is both the source of a symlink or an informational passing of the src for a template module - # or copy module, even if this module never uses it, it is needed to key off some things - if src is not None: - src = os.path.expanduser(src) - - # original_basename is used by other modules that depend on file. - if os.path.isdir(path) and state not in ["link", "absent"]: - if params['original_basename']: - basename = params['original_basename'] - else: - basename = os.path.basename(src) - params['path'] = path = os.path.join(path, basename) - else: - if state in ['link','hard']: - module.fail_json(msg='src and dest are required for creating links') - - file_args = module.load_file_common_arguments(params) - changed = False - - recurse = params['recurse'] - if recurse and state != 'directory': - module.fail_json(path=path, msg="recurse option requires state to be 'directory'") - - if state == 'absent': - if state != prev_state: - if not module.check_mode: - if prev_state == 'directory': - try: - shutil.rmtree(path, ignore_errors=False) - except Exception, e: - module.fail_json(msg="rmtree failed: %s" % str(e)) - else: - try: - os.unlink(path) - except Exception, e: - module.fail_json(path=path, msg="unlinking failed: %s " % str(e)) - module.exit_json(path=path, changed=True) - else: - module.exit_json(path=path, changed=False) - - elif state == 'file': - if state != prev_state: - # file is not absent and any other state is a conflict - module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state)) - - changed = module.set_fs_attributes_if_different(file_args, changed) - module.exit_json(path=path, changed=changed) - - elif state == 'directory': - if prev_state == 'absent': - if module.check_mode: - module.exit_json(changed=True) - changed = True - curpath = '' - # Split the path so we can apply filesystem attributes recursively - # from the root (/) directory for absolute paths or the base path - # of a relative path. We can then walk the appropriate directory - # path to apply attributes. - for dirname in path.strip('/').split('/'): - curpath = '/'.join([curpath, dirname]) - # Remove leading slash if we're creating a relative path - if not os.path.isabs(path): - curpath = curpath.lstrip('/') - if not os.path.exists(curpath): - os.mkdir(curpath) - tmp_file_args = file_args.copy() - tmp_file_args['path']=curpath - changed = module.set_fs_attributes_if_different(tmp_file_args, changed) - - changed = module.set_fs_attributes_if_different(file_args, changed) - - if recurse: - for root,dirs,files in os.walk( file_args['path'] ): - for fsobj in dirs + files: - fsname=os.path.join(root, fsobj) - tmp_file_args = file_args.copy() - tmp_file_args['path']=fsname - changed = module.set_fs_attributes_if_different(tmp_file_args, changed) - - module.exit_json(path=path, changed=changed) - - elif state in ['link','hard']: - - if os.path.isdir(path) and not os.path.islink(path): - relpath = path - else: - relpath = os.path.dirname(path) - - absrc = os.path.normpath('%s/%s' % (relpath, os.path.basename(src))) - if not os.path.exists(src) and not os.path.exists(absrc) and not force: - module.fail_json(path=path, src=src, msg='src file does not exist, use "force=yes" if you really want to create the link: %s' % absrc) - - if state == 'hard': - if not os.path.isabs(src): - module.fail_json(msg="absolute paths are required") - elif prev_state == 'directory': - if not force: - module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, path)) - elif len(os.listdir(path)) > 0: - # refuse to replace a directory that has files in it - module.fail_json(path=path, msg='the directory %s is not empty, refusing to convert it' % path) - elif prev_state in ['file', 'hard'] and not force: - module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, path)) - - if prev_state == 'absent': - changed = True - elif prev_state == 'link': - old_src = os.readlink(path) - if old_src != src: - changed = True - elif prev_state == 'hard': - if not (state == 'hard' and os.stat(path).st_ino == os.stat(src).st_ino): - changed = True - if not force: - module.fail_json(dest=path, src=src, msg='Cannot link, different hard link exists at destination') - elif prev_state in ['file', 'directory']: - changed = True - if not force: - module.fail_json(dest=path, src=src, msg='Cannot link, %s exists at destination' % prev_state) - else: - module.fail_json(dest=path, src=src, msg='unexpected position reached') - - if changed and not module.check_mode: - if prev_state != 'absent': - # try to replace atomically - tmppath = '/'.join([os.path.dirname(path), ".%s.%s.tmp" % (os.getpid(),time.time())]) - try: - if prev_state == 'directory' and (state == 'hard' or state == 'link'): - os.rmdir(path) - if state == 'hard': - os.link(src,tmppath) - else: - os.symlink(src, tmppath) - os.rename(tmppath, path) - except OSError, e: - if os.path.exists(tmppath): - os.unlink(tmppath) - module.fail_json(path=path, msg='Error while replacing: %s' % str(e)) - else: - try: - if state == 'hard': - os.link(src,path) - else: - os.symlink(src, path) - except OSError, e: - module.fail_json(path=path, msg='Error while linking: %s' % str(e)) - - if module.check_mode and not os.path.exists(path): - module.exit_json(dest=path, src=src, changed=changed) - - changed = module.set_fs_attributes_if_different(file_args, changed) - module.exit_json(dest=path, src=src, changed=changed) - - elif state == 'touch': - if not module.check_mode: - - if prev_state == 'absent': - try: - open(path, 'w').close() - except OSError, e: - module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e)) - elif prev_state in ['file', 'directory']: - try: - os.utime(path, None) - except OSError, e: - module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e)) - else: - module.fail_json(msg='Cannot touch other than files and directories') - - module.set_fs_attributes_if_different(file_args, True) - - module.exit_json(dest=path, changed=True) - - module.fail_json(path=path, msg='unexpected position reached') - -# import module snippets -from ansible.module_utils.basic import * -main() - diff --git a/library/files/ini_file b/library/files/ini_file deleted file mode 100644 index 83a980f5ba8..00000000000 --- a/library/files/ini_file +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Jan-Piet Mens -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: ini_file -short_description: Tweak settings in INI files -description: - - Manage (add, remove, change) individual settings in an INI-style file without having - to manage the file as a whole with, say, M(template) or M(assemble). Adds missing - sections if they don't exist. - - Comments are discarded when the source file is read, and therefore will not - show up in the destination file. -version_added: "0.9" -options: - dest: - description: - - Path to the INI-style file; this file is created if required - required: true - default: null - section: - description: - - Section name in INI file. This is added if C(state=present) automatically when - a single value is being set. - required: true - default: null - option: - description: - - if set (required for changing a I(value)), this is the name of the option. - - May be omitted if adding/removing a whole I(section). - required: false - default: null - value: - description: - - the string value to be associated with an I(option). May be omitted when removing an I(option). - required: false - default: null - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - required: false - default: "no" - choices: [ "yes", "no" ] - others: - description: - - all arguments accepted by the M(file) module also work here - required: false -notes: - - While it is possible to add an I(option) without specifying a I(value), this makes - no sense. - - A section named C(default) cannot be added by the module, but if it exists, individual - options within the section can be updated. (This is a limitation of Python's I(ConfigParser).) - Either use M(template) to create a base INI file with a C([default]) section, or use - M(lineinfile) to add the missing line. -requirements: [ ConfigParser ] -author: Jan-Piet Mens -''' - -EXAMPLES = ''' -# Ensure "fav=lemonade is in section "[drinks]" in specified file -- ini_file: dest=/etc/conf section=drinks option=fav value=lemonade mode=0600 backup=yes - -- ini_file: dest=/etc/anotherconf - section=drinks - option=temperature - value=cold - backup=yes -''' - -import ConfigParser -import sys - -# ============================================================== -# do_ini - -def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False): - - changed = False - if (sys.version_info[0] == 2 and sys.version_info[1] >= 7) or sys.version_info[0] >= 3: - cp = ConfigParser.ConfigParser(allow_no_value=True) - else: - cp = ConfigParser.ConfigParser() - cp.optionxform = identity - - try: - f = open(filename) - cp.readfp(f) - except IOError: - pass - - - if state == 'absent': - if option is None and value is None: - if cp.has_section(section): - cp.remove_section(section) - changed = True - else: - if option is not None: - try: - if cp.get(section, option): - cp.remove_option(section, option) - changed = True - except: - pass - - if state == 'present': - - # DEFAULT section is always there by DEFAULT, so never try to add it. - if cp.has_section(section) == False and section.upper() != 'DEFAULT': - - cp.add_section(section) - changed = True - - if option is not None and value is not None: - try: - oldvalue = cp.get(section, option) - if str(value) != str(oldvalue): - cp.set(section, option, value) - changed = True - except ConfigParser.NoSectionError: - cp.set(section, option, value) - changed = True - except ConfigParser.NoOptionError: - cp.set(section, option, value) - changed = True - - if changed: - if backup: - module.backup_local(filename) - - try: - f = open(filename, 'w') - cp.write(f) - except: - module.fail_json(msg="Can't creat %s" % filename) - - return changed - -# ============================================================== -# identity - -def identity(arg): - """ - This function simply returns its argument. It serves as a - replacement for ConfigParser.optionxform, which by default - changes arguments to lower case. The identity function is a - better choice than str() or unicode(), because it is - encoding-agnostic. - """ - return arg - -# ============================================================== -# main - -def main(): - - module = AnsibleModule( - argument_spec = dict( - dest = dict(required=True), - section = dict(required=True), - option = dict(required=False), - value = dict(required=False), - backup = dict(default='no', type='bool'), - state = dict(default='present', choices=['present', 'absent']) - ), - add_file_common_args = True - ) - - info = dict() - - dest = os.path.expanduser(module.params['dest']) - section = module.params['section'] - option = module.params['option'] - value = module.params['value'] - state = module.params['state'] - backup = module.params['backup'] - - changed = do_ini(module, dest, section, option, value, state, backup) - - file_args = module.load_file_common_arguments(module.params) - changed = module.set_fs_attributes_if_different(file_args, changed) - - # Mission complete - module.exit_json(dest=dest, changed=changed, msg="OK") - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/files/lineinfile b/library/files/lineinfile deleted file mode 100644 index ba842e15e2a..00000000000 --- a/library/files/lineinfile +++ /dev/null @@ -1,395 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Daniel Hokka Zakrisson -# (c) 2014, Ahti Kitsik -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import pipes -import re -import os -import tempfile - -DOCUMENTATION = """ ---- -module: lineinfile -author: Daniel Hokka Zakrisson, Ahti Kitsik -short_description: Ensure a particular line is in a file, or replace an - existing line using a back-referenced regular expression. -description: - - This module will search a file for a line, and ensure that it is present or absent. - - This is primarily useful when you want to change a single line in a - file only. For other cases, see the M(copy) or M(template) modules. -version_added: "0.7" -options: - dest: - required: true - aliases: [ name, destfile ] - description: - - The file to modify. - regexp: - required: false - version_added: 1.7 - description: - - The regular expression to look for in every line of the file. For - C(state=present), the pattern to replace if found; only the last line - found will be replaced. For C(state=absent), the pattern of the line - to remove. Uses Python regular expressions; see - U(http://docs.python.org/2/library/re.html). - state: - required: false - choices: [ present, absent ] - default: "present" - aliases: [] - description: - - Whether the line should be there or not. - line: - required: false - description: - - Required for C(state=present). The line to insert/replace into the - file. If C(backrefs) is set, may contain backreferences that will get - expanded with the C(regexp) capture groups if the regexp matches. The - backreferences should be double escaped (see examples). - backrefs: - required: false - default: "no" - choices: [ "yes", "no" ] - version_added: "1.1" - description: - - Used with C(state=present). If set, line can contain backreferences - (both positional and named) that will get populated if the C(regexp) - matches. This flag changes the operation of the module slightly; - C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp) - doesn't match anywhere in the file, the file will be left unchanged. - If the C(regexp) does match, the last matching line will be replaced by - the expanded line parameter. - insertafter: - required: false - default: EOF - description: - - Used with C(state=present). If specified, the line will be inserted - after the specified regular expression. A special value is - available; C(EOF) for inserting the line at the end of the file. - May not be used with C(backrefs). - choices: [ 'EOF', '*regex*' ] - insertbefore: - required: false - version_added: "1.1" - description: - - Used with C(state=present). If specified, the line will be inserted - before the specified regular expression. A value is available; - C(BOF) for inserting the line at the beginning of the file. - May not be used with C(backrefs). - choices: [ 'BOF', '*regex*' ] - create: - required: false - choices: [ "yes", "no" ] - default: "no" - description: - - Used with C(state=present). If specified, the file will be created - if it does not already exist. By default it will fail if the file - is missing. - backup: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - Create a backup file including the timestamp information so you can - get the original file back if you somehow clobbered it incorrectly. - validate: - required: false - description: - - validation to run before copying into place. - Use %s in the command to indicate the current file to validate. - The command is passed securely so shell features like - expansion and pipes won't work. - required: false - default: None - version_added: "1.4" - others: - description: - - All arguments accepted by the M(file) module also work here. - required: false -""" - -EXAMPLES = r""" -- lineinfile: dest=/etc/selinux/config regexp=^SELINUX= line=SELINUX=disabled - -- lineinfile: dest=/etc/sudoers state=absent regexp="^%wheel" - -- lineinfile: dest=/etc/hosts regexp='^127\.0\.0\.1' line='127.0.0.1 localhost' owner=root group=root mode=0644 - -- lineinfile: dest=/etc/httpd/conf/httpd.conf regexp="^Listen " insertafter="^#Listen " line="Listen 8080" - -- lineinfile: dest=/etc/services regexp="^# port for http" insertbefore="^www.*80/tcp" line="# port for http by default" - -# Add a line to a file if it does not exist, without passing regexp -- lineinfile: dest=/tmp/testfile line="192.168.1.99 foo.lab.net foo" - -# Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs. -- lineinfile: "dest=/etc/sudoers state=present regexp='^%wheel' line='%wheel ALL=(ALL) NOPASSWD: ALL'" - -- lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\1Xms${xms}m\3' backrefs=yes - -# Validate a the sudoers file before saving -- lineinfile: dest=/etc/sudoers state=present regexp='^%ADMIN ALL\=' line='%ADMIN ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s' -""" - -def write_changes(module,lines,dest): - - tmpfd, tmpfile = tempfile.mkstemp() - f = os.fdopen(tmpfd,'wb') - f.writelines(lines) - f.close() - - validate = module.params.get('validate', None) - valid = not validate - if validate: - if "%s" not in validate: - module.fail_json(msg="validate must contain %%s: %s" % (validate)) - (rc, out, err) = module.run_command(validate % tmpfile) - valid = rc == 0 - if rc != 0: - module.fail_json(msg='failed to validate: ' - 'rc:%s error:%s' % (rc,err)) - if valid: - module.atomic_move(tmpfile, os.path.realpath(dest)) - -def check_file_attrs(module, changed, message): - - file_args = module.load_file_common_arguments(module.params) - if module.set_fs_attributes_if_different(file_args, False): - - if changed: - message += " and " - changed = True - message += "ownership, perms or SE linux context changed" - - return message, changed - - -def present(module, dest, regexp, line, insertafter, insertbefore, create, - backup, backrefs): - - if not os.path.exists(dest): - if not create: - module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) - destpath = os.path.dirname(dest) - if not os.path.exists(destpath): - os.makedirs(destpath) - lines = [] - else: - f = open(dest, 'rb') - lines = f.readlines() - f.close() - - msg = "" - - if regexp is not None: - mre = re.compile(regexp) - - if insertafter not in (None, 'BOF', 'EOF'): - insre = re.compile(insertafter) - elif insertbefore not in (None, 'BOF'): - insre = re.compile(insertbefore) - else: - insre = None - - # index[0] is the line num where regexp has been found - # index[1] is the line num where insertafter/inserbefore has been found - index = [-1, -1] - m = None - for lineno, cur_line in enumerate(lines): - if regexp is not None: - match_found = mre.search(cur_line) - else: - match_found = line == cur_line.rstrip('\r\n') - if match_found: - index[0] = lineno - m = match_found - elif insre is not None and insre.search(cur_line): - if insertafter: - # + 1 for the next line - index[1] = lineno + 1 - if insertbefore: - # + 1 for the previous line - index[1] = lineno - - msg = '' - changed = False - # Regexp matched a line in the file - if index[0] != -1: - if backrefs: - new_line = m.expand(line) - else: - # Don't do backref expansion if not asked. - new_line = line - - if lines[index[0]] != new_line + os.linesep: - lines[index[0]] = new_line + os.linesep - msg = 'line replaced' - changed = True - elif backrefs: - # Do absolutely nothing, since it's not safe generating the line - # without the regexp matching to populate the backrefs. - pass - # Add it to the beginning of the file - elif insertbefore == 'BOF' or insertafter == 'BOF': - lines.insert(0, line + os.linesep) - msg = 'line added' - changed = True - # Add it to the end of the file if requested or - # if insertafter=/insertbefore didn't match anything - # (so default behaviour is to add at the end) - elif insertafter == 'EOF': - - # If the file is not empty then ensure there's a newline before the added line - if len(lines)>0 and not (lines[-1].endswith('\n') or lines[-1].endswith('\r')): - lines.append(os.linesep) - - lines.append(line + os.linesep) - msg = 'line added' - changed = True - # Do nothing if insert* didn't match - elif index[1] == -1: - pass - # insert* matched, but not the regexp - else: - lines.insert(index[1], line + os.linesep) - msg = 'line added' - changed = True - - backupdest = "" - if changed and not module.check_mode: - if backup and os.path.exists(dest): - backupdest = module.backup_local(dest) - write_changes(module, lines, dest) - - msg, changed = check_file_attrs(module, changed, msg) - module.exit_json(changed=changed, msg=msg, backup=backupdest) - - -def absent(module, dest, regexp, line, backup): - - if not os.path.exists(dest): - module.exit_json(changed=False, msg="file not present") - - msg = "" - - f = open(dest, 'rb') - lines = f.readlines() - f.close() - if regexp is not None: - cre = re.compile(regexp) - found = [] - - def matcher(cur_line): - if regexp is not None: - match_found = cre.search(cur_line) - else: - match_found = line == cur_line.rstrip('\r\n') - if match_found: - found.append(cur_line) - return not match_found - - lines = filter(matcher, lines) - changed = len(found) > 0 - backupdest = "" - if changed and not module.check_mode: - if backup: - backupdest = module.backup_local(dest) - write_changes(module, lines, dest) - - if changed: - msg = "%s line(s) removed" % len(found) - - msg, changed = check_file_attrs(module, changed, msg) - module.exit_json(changed=changed, found=len(found), msg=msg, backup=backupdest) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - dest=dict(required=True, aliases=['name', 'destfile']), - state=dict(default='present', choices=['absent', 'present']), - regexp=dict(default=None), - line=dict(aliases=['value']), - insertafter=dict(default=None), - insertbefore=dict(default=None), - backrefs=dict(default=False, type='bool'), - create=dict(default=False, type='bool'), - backup=dict(default=False, type='bool'), - validate=dict(default=None, type='str'), - ), - mutually_exclusive=[['insertbefore', 'insertafter']], - add_file_common_args=True, - supports_check_mode=True - ) - - params = module.params - create = module.params['create'] - backup = module.params['backup'] - backrefs = module.params['backrefs'] - dest = os.path.expanduser(params['dest']) - - - if os.path.isdir(dest): - module.fail_json(rc=256, msg='Destination %s is a directory !' % dest) - - if params['state'] == 'present': - if backrefs and params['regexp'] is None: - module.fail_json(msg='regexp= is required with backrefs=true') - - if params.get('line', None) is None: - module.fail_json(msg='line= is required with state=present') - - # Deal with the insertafter default value manually, to avoid errors - # because of the mutually_exclusive mechanism. - ins_bef, ins_aft = params['insertbefore'], params['insertafter'] - if ins_bef is None and ins_aft is None: - ins_aft = 'EOF' - - line = params['line'] - - # The safe_eval call will remove some quoting, but not others, - # so we need to know if we should specifically unquote it. - should_unquote = not is_quoted(line) - - # Replace escape sequences like '\n' while being sure - # not to replace octal escape sequences (\ooo) since they - # match the backref syntax. - if backrefs: - line = re.sub(r'(\\[0-9]{1,3})', r'\\\1', line) - line = module.safe_eval(pipes.quote(line)) - - # Now remove quotes around the string, if needed - if should_unquote: - line = unquote(line) - - present(module, dest, params['regexp'], line, - ins_aft, ins_bef, create, backup, backrefs) - else: - if params['regexp'] is None and params.get('line', None) is None: - module.fail_json(msg='one of line= or regexp= is required with state=absent') - - absent(module, dest, params['regexp'], params.get('line', None), backup) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.splitter import * - -main() diff --git a/library/files/replace b/library/files/replace deleted file mode 100644 index 57b522dd773..00000000000 --- a/library/files/replace +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Evan Kaufman . - -import re -import os -import tempfile - -DOCUMENTATION = """ ---- -module: replace -author: Evan Kaufman -short_description: Replace all instances of a particular string in a - file using a back-referenced regular expression. -description: - - This module will replace all instances of a pattern within a file. - - It is up to the user to maintain idempotence by ensuring that the - same pattern would never match any replacements made. -version_added: "1.6" -options: - dest: - required: true - aliases: [ name, destfile ] - description: - - The file to modify. - regexp: - required: true - description: - - The regular expression to look for in the contents of the file. - Uses Python regular expressions; see - U(http://docs.python.org/2/library/re.html). - Uses multiline mode, which means C(^) and C($) match the beginning - and end respectively of I(each line) of the file. - replace: - required: false - description: - - The string to replace regexp matches. May contain backreferences - that will get expanded with the regexp capture groups if the regexp - matches. If not set, matches are removed entirely. - backup: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - Create a backup file including the timestamp information so you can - get the original file back if you somehow clobbered it incorrectly. - validate: - required: false - description: - - validation to run before copying into place - required: false - default: None - others: - description: - - All arguments accepted by the M(file) module also work here. - required: false -""" - -EXAMPLES = r""" -- replace: dest=/etc/hosts regexp='(\s+)old\.host\.name(\s+.*)?$' replace='\1new.host.name\2' backup=yes - -- replace: dest=/home/jdoe/.ssh/known_hosts regexp='^old\.host\.name[^\n]*\n' owner=jdoe group=jdoe mode=644 - -- replace: dest=/etc/apache/ports regexp='^(NameVirtualHost|Listen)\s+80\s*$' replace='\1 127.0.0.1:8080' validate='/usr/sbin/apache2ctl -f %s -t' -""" - -def write_changes(module,contents,dest): - - tmpfd, tmpfile = tempfile.mkstemp() - f = os.fdopen(tmpfd,'wb') - f.write(contents) - f.close() - - validate = module.params.get('validate', None) - valid = not validate - if validate: - if "%s" not in validate: - module.fail_json(msg="validate must contain %%s: %s" % (validate)) - (rc, out, err) = module.run_command(validate % tmpfile) - valid = rc == 0 - if rc != 0: - module.fail_json(msg='failed to validate: ' - 'rc:%s error:%s' % (rc,err)) - if valid: - module.atomic_move(tmpfile, dest) - -def check_file_attrs(module, changed, message): - - file_args = module.load_file_common_arguments(module.params) - if module.set_file_attributes_if_different(file_args, False): - - if changed: - message += " and " - changed = True - message += "ownership, perms or SE linux context changed" - - return message, changed - -def main(): - module = AnsibleModule( - argument_spec=dict( - dest=dict(required=True, aliases=['name', 'destfile']), - regexp=dict(required=True), - replace=dict(default='', type='str'), - backup=dict(default=False, type='bool'), - validate=dict(default=None, type='str'), - ), - add_file_common_args=True, - supports_check_mode=True - ) - - params = module.params - dest = os.path.expanduser(params['dest']) - - if os.path.isdir(dest): - module.fail_json(rc=256, msg='Destination %s is a directory !' % dest) - - if not os.path.exists(dest): - module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) - else: - f = open(dest, 'rb') - contents = f.read() - f.close() - - mre = re.compile(params['regexp'], re.MULTILINE) - result = re.subn(mre, params['replace'], contents, 0) - - if result[1] > 0 and contents != result[0]: - msg = '%s replacements made' % result[1] - changed = True - else: - msg = '' - changed = False - - if changed and not module.check_mode: - if params['backup'] and os.path.exists(dest): - module.backup_local(dest) - write_changes(module, result[0], dest) - - msg, changed = check_file_attrs(module, changed, msg) - module.exit_json(changed=changed, msg=msg) - -# this is magic, see lib/ansible/module_common.py -#<> - -main() diff --git a/library/files/synchronize b/library/files/synchronize deleted file mode 100644 index 842dd863849..00000000000 --- a/library/files/synchronize +++ /dev/null @@ -1,345 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012-2013, Timothy Appnel -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: synchronize -version_added: "1.4" -short_description: Uses rsync to make synchronizing file paths in your playbooks quick and easy. -description: - - This is a wrapper around rsync. Of course you could just use the command action to call rsync yourself, but you also have to add a fair number of boilerplate options and host facts. You still may need to call rsync directly via C(command) or C(shell) depending on your use case. The synchronize action is meant to do common things with C(rsync) easily. It does not provide access to the full power of rsync, but does make most invocations easier to follow. -options: - src: - description: - - Path on the source machine that will be synchronized to the destination; The path can be absolute or relative. - required: true - dest: - description: - - Path on the destination machine that will be synchronized from the source; The path can be absolute or relative. - required: true - dest_port: - description: - - Port number for ssh on the destination host. The ansible_ssh_port inventory var takes precedence over this value. - default: 22 - version_added: "1.5" - mode: - description: - - Specify the direction of the synchroniztion. In push mode the localhost or delegate is the source; In pull mode the remote host in context is the source. - required: false - choices: [ 'push', 'pull' ] - default: 'push' - archive: - description: - - Mirrors the rsync archive flag, enables recursive, links, perms, times, owner, group flags and -D. - choices: [ 'yes', 'no' ] - default: 'yes' - required: false - checksum: - description: - - Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default - the "checksum" option will not disable it. - choices: [ 'yes', 'no' ] - default: 'no' - required: false - version_added: "1.6" - compress: - description: - - Compress file data during the transfer. In most cases, leave this enabled unless it causes problems. - choices: [ 'yes', 'no' ] - default: 'yes' - required: false - version_added: "1.7" - existing_only: - description: - - Skip creating new files on receiver. - choices: [ 'yes', 'no' ] - default: 'no' - required: false - version_added: "1.5" - delete: - description: - - Delete files that don't exist (after transfer, not before) in the C(src) path. This option requires C(recursive=yes). - choices: [ 'yes', 'no' ] - default: 'no' - required: false - dirs: - description: - - Transfer directories without recursing - choices: [ 'yes', 'no' ] - default: 'no' - required: false - recursive: - description: - - Recurse into directories. - choices: [ 'yes', 'no' ] - default: the value of the archive option - required: false - links: - description: - - Copy symlinks as symlinks. - choices: [ 'yes', 'no' ] - default: the value of the archive option - required: false - copy_links: - description: - - Copy symlinks as the item that they point to (the referent) is copied, rather than the symlink. - choices: [ 'yes', 'no' ] - default: 'no' - required: false - perms: - description: - - Preserve permissions. - choices: [ 'yes', 'no' ] - default: the value of the archive option - required: false - times: - description: - - Preserve modification times - choices: [ 'yes', 'no' ] - default: the value of the archive option - required: false - owner: - description: - - Preserve owner (super user only) - choices: [ 'yes', 'no' ] - default: the value of the archive option - required: false - group: - description: - - Preserve group - choices: [ 'yes', 'no' ] - default: the value of the archive option - required: false - rsync_path: - description: - - Specify the rsync command to run on the remote machine. See C(--rsync-path) on the rsync man page. - required: false - rsync_timeout: - description: - - Specify a --timeout for the rsync command in seconds. - default: 0 - required: false - set_remote_user: - description: - - put user@ for the remote paths. If you have a custom ssh config to define the remote user for a host - that does not match the inventory user, you should set this parameter to "no". - default: yes - rsync_opts: - description: - - Specify additional rsync options by passing in an array. - default: - required: false - version_added: "1.6" -notes: - - Inspect the verbose output to validate the destination user/host/path - are what was expected. - - The remote user for the dest path will always be the remote_user, not - the sudo_user. - - Expect that dest=~/x will be ~/x even if using sudo. - - To exclude files and directories from being synchronized, you may add - C(.rsync-filter) files to the source directory. - - -author: Timothy Appnel -''' - -EXAMPLES = ''' -# Synchronization of src on the control machine to dest on the remote hosts -synchronize: src=some/relative/path dest=/some/absolute/path - -# Synchronization without any --archive options enabled -synchronize: src=some/relative/path dest=/some/absolute/path archive=no - -# Synchronization with --archive options enabled except for --recursive -synchronize: src=some/relative/path dest=/some/absolute/path recursive=no - -# Synchronization with --archive options enabled except for --times, with --checksum option enabled -synchronize: src=some/relative/path dest=/some/absolute/path checksum=yes times=no - -# Synchronization without --archive options enabled except use --links -synchronize: src=some/relative/path dest=/some/absolute/path archive=no links=yes - -# Synchronization of two paths both on the control machine -local_action: synchronize src=some/relative/path dest=/some/absolute/path - -# Synchronization of src on the inventory host to the dest on the localhost in -pull mode -synchronize: mode=pull src=some/relative/path dest=/some/absolute/path - -# Synchronization of src on delegate host to dest on the current inventory host -synchronize: > - src=some/relative/path dest=/some/absolute/path - delegate_to: delegate.host - -# Synchronize and delete files in dest on the remote host that are not found in src of localhost. -synchronize: src=some/relative/path dest=/some/absolute/path delete=yes - -# Synchronize using an alternate rsync command -synchronize: src=some/relative/path dest=/some/absolute/path rsync_path="sudo rsync" - -# Example .rsync-filter file in the source directory -- var # exclude any path whose last part is 'var' -- /var # exclude any path starting with 'var' starting at the source directory -+ /var/conf # include /var/conf even though it was previously excluded - -# Synchronize passing in extra rsync options -synchronize: src=/tmp/helloworld dest=/var/www/helloword rsync_opts=--no-motd,--exclude=.git -''' - - -def main(): - module = AnsibleModule( - argument_spec = dict( - src = dict(required=True), - dest = dict(required=True), - dest_port = dict(default=22), - delete = dict(default='no', type='bool'), - private_key = dict(default=None), - rsync_path = dict(default=None), - archive = dict(default='yes', type='bool'), - checksum = dict(default='no', type='bool'), - compress = dict(default='yes', type='bool'), - existing_only = dict(default='no', type='bool'), - dirs = dict(default='no', type='bool'), - recursive = dict(type='bool'), - links = dict(type='bool'), - copy_links = dict(type='bool'), - perms = dict(type='bool'), - times = dict(type='bool'), - owner = dict(type='bool'), - group = dict(type='bool'), - set_remote_user = dict(default='yes', type='bool'), - rsync_timeout = dict(type='int', default=0), - rsync_opts = dict(type='list') - ), - supports_check_mode = True - ) - - source = '"' + module.params['src'] + '"' - dest = '"' + module.params['dest'] + '"' - dest_port = module.params['dest_port'] - delete = module.params['delete'] - private_key = module.params['private_key'] - rsync_path = module.params['rsync_path'] - rsync = module.params.get('local_rsync_path', 'rsync') - rsync_timeout = module.params.get('rsync_timeout', 'rsync_timeout') - archive = module.params['archive'] - checksum = module.params['checksum'] - compress = module.params['compress'] - existing_only = module.params['existing_only'] - dirs = module.params['dirs'] - # the default of these params depends on the value of archive - recursive = module.params['recursive'] - links = module.params['links'] - copy_links = module.params['copy_links'] - perms = module.params['perms'] - times = module.params['times'] - owner = module.params['owner'] - group = module.params['group'] - rsync_opts = module.params['rsync_opts'] - - cmd = '%s --delay-updates -FF' % rsync - if compress: - cmd = cmd + ' --compress' - if rsync_timeout: - cmd = cmd + ' --timeout=%s' % rsync_timeout - if module.check_mode: - cmd = cmd + ' --dry-run' - if delete: - cmd = cmd + ' --delete-after' - if existing_only: - cmd = cmd + ' --existing' - if checksum: - cmd = cmd + ' --checksum' - if archive: - cmd = cmd + ' --archive' - if recursive is False: - cmd = cmd + ' --no-recursive' - if links is False: - cmd = cmd + ' --no-links' - if copy_links is True: - cmd = cmd + ' --copy-links' - if perms is False: - cmd = cmd + ' --no-perms' - if times is False: - cmd = cmd + ' --no-times' - if owner is False: - cmd = cmd + ' --no-owner' - if group is False: - cmd = cmd + ' --no-group' - else: - if recursive is True: - cmd = cmd + ' --recursive' - if links is True: - cmd = cmd + ' --links' - if copy_links is True: - cmd = cmd + ' --copy-links' - if perms is True: - cmd = cmd + ' --perms' - if times is True: - cmd = cmd + ' --times' - if owner is True: - cmd = cmd + ' --owner' - if group is True: - cmd = cmd + ' --group' - if dirs: - cmd = cmd + ' --dirs' - if private_key is None: - private_key = '' - else: - private_key = '-i '+ private_key - - ssh_opts = '-S none -o StrictHostKeyChecking=no' - if dest_port != 22: - cmd += " --rsh 'ssh %s %s -o Port=%s'" % (private_key, ssh_opts, dest_port) - else: - cmd += " --rsh 'ssh %s %s'" % (private_key, ssh_opts) # need ssh param - - if rsync_path: - cmd = cmd + " --rsync-path=%s" % (rsync_path) - - if rsync_opts: - cmd = cmd + " " + " ".join(rsync_opts) - - changed_marker = '<>' - cmd = cmd + " --out-format='" + changed_marker + "%i %n%L'" - - # expand the paths - if '@' not in source: - source = os.path.expanduser(source) - if '@' not in dest: - dest = os.path.expanduser(dest) - - cmd = ' '.join([cmd, source, dest]) - cmdstr = cmd - (rc, out, err) = module.run_command(cmd) - if rc: - return module.fail_json(msg=err, rc=rc, cmd=cmdstr) - else: - changed = changed_marker in out - out_clean=out.replace(changed_marker,'') - out_lines=out_clean.split('\n') - while '' in out_lines: - out_lines.remove('') - return module.exit_json(changed=changed, msg=out_clean, - rc=rc, cmd=cmdstr, stdout_lines=out_lines) - -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/files/template b/library/files/template deleted file mode 100644 index 7ba072fcdc1..00000000000 --- a/library/files/template +++ /dev/null @@ -1,66 +0,0 @@ -# this is a virtual module that is entirely implemented server side - -DOCUMENTATION = ''' ---- -module: template -version_added: historical -short_description: Templates a file out to a remote server. -description: - - Templates are processed by the Jinja2 templating language - (U(http://jinja.pocoo.org/docs/)) - documentation on the template - formatting can be found in the Template Designer Documentation - (U(http://jinja.pocoo.org/docs/templates/)). - - "Six additional variables can be used in templates: C(ansible_managed) - (configurable via the C(defaults) section of C(ansible.cfg)) contains a string - which can be used to describe the template name, host, modification time of the - template file and the owner uid, C(template_host) contains the node name of - the template's machine, C(template_uid) the owner, C(template_path) the - absolute path of the template, C(template_fullpath) is the absolute path of the - template, and C(template_run_date) is the date that the template was rendered. Note that including - a string that uses a date in the template will result in the template being marked 'changed' - each time." -options: - src: - description: - - Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path. - required: true - default: null - aliases: [] - dest: - description: - - Location to render the template to on the remote machine. - required: true - default: null - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - required: false - choices: [ "yes", "no" ] - default: "no" - validate: - description: - - The validation command to run before copying into place. - - The path to the file to validate is passed in via '%s' which must be present as in the visudo example below. - - validation to run before copying into place. The command is passed - securely so shell features like expansion and pipes won't work. - required: false - default: "" - version_added: "1.2" -notes: - - "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)." -requirements: [] -author: Michael DeHaan -extends_documentation_fragment: files -''' - -EXAMPLES = ''' -# Example from Ansible Playbooks -- template: src=/mytemplates/foo.j2 dest=/etc/file.conf owner=bin group=wheel mode=0644 - -# The same example, but using symbolic modes equivalent to 0644 -- template: src=/mytemplates/foo.j2 dest=/etc/file.conf owner=bin group=wheel mode="u=rw,g=r,o=r" - -# Copy a new "sudoers" file into place, after passing validation with visudo -- template: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s' -''' diff --git a/library/files/unarchive b/library/files/unarchive deleted file mode 100644 index 657e464937b..00000000000 --- a/library/files/unarchive +++ /dev/null @@ -1,250 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# (c) 2013, Dylan Martin -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: unarchive -version_added: 1.4 -short_description: Copies an archive to a remote location and unpack it -description: - - The M(unarchive) module copies an archive file from the local machine to a remote and unpacks it. -options: - src: - description: - - Local path to archive file to copy to the remote server; can be absolute or relative. - required: true - default: null - dest: - description: - - Remote absolute path where the archive should be unpacked - required: true - default: null - copy: - description: - - "if true, the file is copied from the 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine." - required: false - choices: [ "yes", "no" ] - default: "yes" - creates: - description: - - a filename, when it already exists, this step will B(not) be run. - required: no - default: null - version_added: "1.6" -author: Dylan Martin -todo: - - detect changed/unchanged for .zip files - - handle common unarchive args, like preserve owner/timestamp etc... -notes: - - requires C(tar)/C(unzip) command on target host - - can handle I(gzip), I(bzip2) and I(xz) compressed as well as uncompressed tar files - - detects type of archive automatically - - uses tar's C(--diff arg) to calculate if changed or not. If this C(arg) is not - supported, it will always unpack the archive - - does not detect if a .zip file is different from destination - always unzips - - existing files/directories in the destination which are not in the archive - are not touched. This is the same behavior as a normal archive extraction - - existing files/directories in the destination which are not in the archive - are ignored for purposes of deciding if the archive should be unpacked or not -''' - -EXAMPLES = ''' -# Example from Ansible Playbooks -- unarchive: src=foo.tgz dest=/var/lib/foo - -# Unarchive a file that is already on the remote machine -- unarchive: src=/tmp/foo.zip dest=/usr/local/bin copy=no -''' - -import os - - -# class to handle .zip files -class ZipFile(object): - - def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('unzip') - - def is_unarchived(self): - return dict(unarchived=False) - - def unarchive(self): - cmd = '%s -o "%s" -d "%s"' % (self.cmd_path, self.src, self.dest) - rc, out, err = self.module.run_command(cmd) - return dict(cmd=cmd, rc=rc, out=out, err=err) - - def can_handle_archive(self): - if not self.cmd_path: - return False - cmd = '%s -l "%s"' % (self.cmd_path, self.src) - rc, out, err = self.module.run_command(cmd) - if rc == 0: - return True - return False - - -# class to handle gzipped tar files -class TgzFile(object): - - def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') - self.zipflag = 'z' - - def is_unarchived(self): - cmd = '%s -v -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src) - rc, out, err = self.module.run_command(cmd) - unarchived = (rc == 0) - return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd) - - def unarchive(self): - cmd = '%s -x%sf "%s"' % (self.cmd_path, self.zipflag, self.src) - rc, out, err = self.module.run_command(cmd, cwd=self.dest) - return dict(cmd=cmd, rc=rc, out=out, err=err) - - def can_handle_archive(self): - if not self.cmd_path: - return False - cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src) - rc, out, err = self.module.run_command(cmd) - if rc == 0: - if len(out.splitlines(True)) > 0: - return True - return False - - -# class to handle tar files that aren't compressed -class TarFile(TgzFile): - def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') - self.zipflag = '' - - -# class to handle bzip2 compressed tar files -class TarBzip(TgzFile): - def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') - self.zipflag = 'j' - - -# class to handle xz compressed tar files -class TarXz(TgzFile): - def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') - self.zipflag = 'J' - - -# try handlers in order and return the one that works or bail if none work -def pick_handler(src, dest, module): - handlers = [TgzFile, ZipFile, TarFile, TarBzip, TarXz] - for handler in handlers: - obj = handler(src, dest, module) - if obj.can_handle_archive(): - return obj - module.fail_json(msg='Failed to find handler to unarchive. Make sure the required command to extract the file is installed.') - - -def main(): - module = AnsibleModule( - # not checking because of daisy chain to file module - argument_spec = dict( - src = dict(required=True), - original_basename = dict(required=False), # used to handle 'dest is a directory' via template, a slight hack - dest = dict(required=True), - copy = dict(default=True, type='bool'), - creates = dict(required=False), - ), - add_file_common_args=True, - ) - - src = os.path.expanduser(module.params['src']) - dest = os.path.expanduser(module.params['dest']) - copy = module.params['copy'] - creates = module.params['creates'] - - # did tar file arrive? - if not os.path.exists(src): - if copy: - module.fail_json(msg="Source '%s' failed to transfer" % src) - else: - module.fail_json(msg="Source '%s' does not exist" % src) - if not os.access(src, os.R_OK): - module.fail_json(msg="Source '%s' not readable" % src) - - if creates: - # do not run the command if the line contains creates=filename - # and the filename already exists. This allows idempotence - # of command executions. - v = os.path.expanduser(creates) - if os.path.exists(v): - module.exit_json( - stdout="skipped, since %s exists" % v, - skipped=True, - changed=False, - stderr=False, - rc=0 - ) - - # is dest OK to receive tar file? - if not os.path.isdir(dest): - module.fail_json(msg="Destination '%s' is not a directory" % dest) - if not os.access(dest, os.W_OK): - module.fail_json(msg="Destination '%s' not writable" % dest) - - handler = pick_handler(src, dest, module) - - res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src) - - # do we need to do unpack? - res_args['check_results'] = handler.is_unarchived() - if res_args['check_results']['unarchived']: - res_args['changed'] = False - module.exit_json(**res_args) - - # do the unpack - try: - res_args['extract_results'] = handler.unarchive() - if res_args['extract_results']['rc'] != 0: - module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args) - except IOError: - module.fail_json(msg="failed to unpack %s to %s" % (src, dest)) - - res_args['changed'] = True - - module.exit_json(**res_args) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/files/xattr b/library/files/xattr deleted file mode 100644 index 94115ae3b51..00000000000 --- a/library/files/xattr +++ /dev/null @@ -1,206 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: xattr -version_added: "1.3" -short_description: set/retrieve extended attributes -description: - - Manages filesystem user defined extended attributes, requires that they are enabled - on the target filesystem and that the setfattr/getfattr utilities are present. -options: - name: - required: true - default: None - aliases: ['path'] - description: - - The full path of the file/object to get the facts of - key: - required: false - default: None - description: - - The name of a specific Extended attribute key to set/retrieve - value: - required: false - default: None - description: - - The value to set the named name/key to, it automatically sets the C(state) to 'set' - state: - required: false - default: get - choices: [ 'read', 'present', 'all', 'keys', 'absent' ] - description: - - defines which state you want to do. - C(read) retrieves the current value for a C(key) (default) - C(present) sets C(name) to C(value), default if value is set - C(all) dumps all data - C(keys) retrieves all keys - C(absent) deletes the key - follow: - required: false - default: yes - choices: [ 'yes', 'no' ] - description: - - if yes, dereferences symlinks and sets/gets attributes on symlink target, - otherwise acts on symlink itself. - -author: Brian Coca -''' - -EXAMPLES = ''' -# Obtain the extended attributes of /etc/foo.conf -- xattr: name=/etc/foo.conf - -# Sets the key 'foo' to value 'bar' -- xattr: path=/etc/foo.conf key=user.foo value=bar - -# Removes the key 'foo' -- xattr: name=/etc/foo.conf key=user.foo state=absent -''' - -import operator - -def get_xattr_keys(module,path,follow): - cmd = [ module.get_bin_path('getfattr', True) ] - # prevents warning and not sure why it's not default - cmd.append('--absolute-names') - if not follow: - cmd.append('-h') - cmd.append(path) - - return _run_xattr(module,cmd) - -def get_xattr(module,path,key,follow): - - cmd = [ module.get_bin_path('getfattr', True) ] - # prevents warning and not sure why it's not default - cmd.append('--absolute-names') - if not follow: - cmd.append('-h') - if key is None: - cmd.append('-d') - else: - cmd.append('-n %s' % key) - cmd.append(path) - - return _run_xattr(module,cmd,False) - -def set_xattr(module,path,key,value,follow): - - cmd = [ module.get_bin_path('setfattr', True) ] - if not follow: - cmd.append('-h') - cmd.append('-n %s' % key) - cmd.append('-v %s' % value) - cmd.append(path) - - return _run_xattr(module,cmd) - -def rm_xattr(module,path,key,follow): - - cmd = [ module.get_bin_path('setfattr', True) ] - if not follow: - cmd.append('-h') - cmd.append('-x %s' % key) - cmd.append(path) - - return _run_xattr(module,cmd,False) - -def _run_xattr(module,cmd,check_rc=True): - - try: - (rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc) - except Exception, e: - module.fail_json(msg="%s!" % e.strerror) - - #result = {'raw': out} - result = {} - for line in out.splitlines(): - if re.match("^#", line) or line == "": - pass - elif re.search('=', line): - (key, val) = line.split("=") - result[key] = val.strip('"') - else: - result[line] = '' - return result - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True, aliases=['path']), - key = dict(required=False, default=None), - value = dict(required=False, default=None), - state = dict(required=False, default='read', choices=[ 'read', 'present', 'all', 'keys', 'absent' ], type='str'), - follow = dict(required=False, type='bool', default=True), - ), - supports_check_mode=True, - ) - path = module.params.get('name') - key = module.params.get('key') - value = module.params.get('value') - state = module.params.get('state') - follow = module.params.get('follow') - - if not os.path.exists(path): - module.fail_json(msg="path not found or not accessible!") - - - changed=False - msg = "" - res = {} - - if key is None and state in ['present','absent']: - module.fail_json(msg="%s needs a key parameter" % state) - - # All xattr must begin in user namespace - if key is not None and not re.match('^user\.',key): - key = 'user.%s' % key - - - if (state == 'present' or value is not None): - current=get_xattr(module,path,key,follow) - if current is None or not key in current or value != current[key]: - if not module.check_mode: - res = set_xattr(module,path,key,value,follow) - changed=True - res=current - msg="%s set to %s" % (key, value) - elif state == 'absent': - current=get_xattr(module,path,key,follow) - if current is not None and key in current: - if not module.check_mode: - res = rm_xattr(module,path,key,follow) - changed=True - res=current - msg="%s removed" % (key) - elif state == 'keys': - res=get_xattr_keys(module,path,follow) - msg="returning all keys" - elif state == 'all': - res=get_xattr(module,path,None,follow) - msg="dumping all" - else: - res=get_xattr(module,path,key,follow) - msg="returning %s" % key - - module.exit_json(changed=changed, msg=msg, xattr=res) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/internal/async_status b/library/internal/async_status deleted file mode 100644 index 1605f877a46..00000000000 --- a/library/internal/async_status +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan , and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: async_status -short_description: Obtain status of asynchronous task -description: - - "This module gets the status of an asynchronous task." -version_added: "0.5" -options: - jid: - description: - - Job or task identifier - required: true - default: null - aliases: [] - mode: - description: - - if C(status), obtain the status; if C(cleanup), clean up the async job cache - located in C(~/.ansible_async/) for the specified job I(jid). - required: false - choices: [ "status", "cleanup" ] - default: "status" -notes: - - See also U(http://docs.ansible.com/playbooks_async.html) -requirements: [] -author: Michael DeHaan -''' - -import datetime -import traceback - -def main(): - - module = AnsibleModule(argument_spec=dict( - jid=dict(required=True), - mode=dict(default='status', choices=['status','cleanup']), - )) - - mode = module.params['mode'] - jid = module.params['jid'] - - # setup logging directory - logdir = os.path.expanduser("~/.ansible_async") - log_path = os.path.join(logdir, jid) - - if not os.path.exists(log_path): - module.fail_json(msg="could not find job", ansible_job_id=jid) - - if mode == 'cleanup': - os.unlink(log_path) - module.exit_json(ansible_job_id=jid, erased=log_path) - - # NOT in cleanup mode, assume regular status mode - # no remote kill mode currently exists, but probably should - # consider log_path + ".pid" file and also unlink that above - - data = file(log_path).read() - try: - data = json.loads(data) - except Exception, e: - if data == '': - # file not written yet? That means it is running - module.exit_json(results_file=log_path, ansible_job_id=jid, started=1) - else: - module.fail_json(ansible_job_id=jid, results_file=log_path, - msg="Could not parse job output: %s" % data) - - if not 'started' in data: - data['finished'] = 1 - data['ansible_job_id'] = jid - - # Fix error: TypeError: exit_json() keywords must be strings - data = dict([(str(k), v) for k, v in data.iteritems()]) - - module.exit_json(**data) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/internal/async_wrapper b/library/internal/async_wrapper deleted file mode 100644 index 2bc2dc21823..00000000000 --- a/library/internal/async_wrapper +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan , and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -try: - import json -except ImportError: - import simplejson as json -import shlex -import os -import subprocess -import sys -import datetime -import traceback -import signal -import time -import syslog - -def daemonize_self(): - # daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012 - # logger.info("cobblerd started") - try: - pid = os.fork() - if pid > 0: - # exit first parent - sys.exit(0) - except OSError, e: - print >>sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror) - sys.exit(1) - - # decouple from parent environment - os.chdir("/") - os.setsid() - os.umask(022) - - # do second fork - try: - pid = os.fork() - if pid > 0: - # print "Daemon PID %d" % pid - sys.exit(0) - except OSError, e: - print >>sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror) - sys.exit(1) - - dev_null = file('/dev/null','rw') - os.dup2(dev_null.fileno(), sys.stdin.fileno()) - os.dup2(dev_null.fileno(), sys.stdout.fileno()) - os.dup2(dev_null.fileno(), sys.stderr.fileno()) - -if len(sys.argv) < 3: - print json.dumps({ - "failed" : True, - "msg" : "usage: async_wrapper . Humans, do not call directly!" - }) - sys.exit(1) - -jid = "%s.%d" % (sys.argv[1], os.getpid()) -time_limit = sys.argv[2] -wrapped_module = sys.argv[3] -argsfile = sys.argv[4] -cmd = "%s %s" % (wrapped_module, argsfile) - -syslog.openlog('ansible-%s' % os.path.basename(__file__)) -syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:])) - -# setup logging directory -logdir = os.path.expanduser("~/.ansible_async") -log_path = os.path.join(logdir, jid) - -if not os.path.exists(logdir): - try: - os.makedirs(logdir) - except: - print json.dumps({ - "failed" : 1, - "msg" : "could not create: %s" % logdir - }) - -def _run_command(wrapped_cmd, jid, log_path): - - logfile = open(log_path, "w") - logfile.write(json.dumps({ "started" : 1, "ansible_job_id" : jid })) - logfile.close() - logfile = open(log_path, "w") - result = {} - - outdata = '' - try: - cmd = shlex.split(wrapped_cmd) - script = subprocess.Popen(cmd, shell=False, - stdin=None, stdout=logfile, stderr=logfile) - script.communicate() - outdata = file(log_path).read() - result = json.loads(outdata) - - except (OSError, IOError), e: - result = { - "failed": 1, - "cmd" : wrapped_cmd, - "msg": str(e), - } - result['ansible_job_id'] = jid - logfile.write(json.dumps(result)) - except: - result = { - "failed" : 1, - "cmd" : wrapped_cmd, - "data" : outdata, # temporary debug only - "msg" : traceback.format_exc() - } - result['ansible_job_id'] = jid - logfile.write(json.dumps(result)) - logfile.close() - -# immediately exit this process, leaving an orphaned process -# running which immediately forks a supervisory timing process - -#import logging -#import logging.handlers - -#logger = logging.getLogger("ansible_async") -#logger.setLevel(logging.WARNING) -#logger.addHandler( logging.handlers.SysLogHandler("/dev/log") ) -def debug(msg): - #logger.warning(msg) - pass - -try: - pid = os.fork() - if pid: - # Notify the overlord that the async process started - - # we need to not return immmediately such that the launched command has an attempt - # to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile) - # this probably could be done with some IPC later. Modules should always read - # the argsfile at the very first start of their execution anyway - time.sleep(1) - debug("Return async_wrapper task started.") - print json.dumps({ "started" : 1, "ansible_job_id" : jid, "results_file" : log_path }) - sys.stdout.flush() - sys.exit(0) - else: - # The actual wrapper process - - # Daemonize, so we keep on running - daemonize_self() - - # we are now daemonized, create a supervisory process - debug("Starting module and watcher") - - sub_pid = os.fork() - if sub_pid: - # the parent stops the process after the time limit - remaining = int(time_limit) - - # set the child process group id to kill all children - os.setpgid(sub_pid, sub_pid) - - debug("Start watching %s (%s)"%(sub_pid, remaining)) - time.sleep(5) - while os.waitpid(sub_pid, os.WNOHANG) == (0, 0): - debug("%s still running (%s)"%(sub_pid, remaining)) - time.sleep(5) - remaining = remaining - 5 - if remaining <= 0: - debug("Now killing %s"%(sub_pid)) - os.killpg(sub_pid, signal.SIGKILL) - debug("Sent kill to group %s"%sub_pid) - time.sleep(1) - sys.exit(0) - debug("Done in kid B.") - os._exit(0) - else: - # the child process runs the actual module - debug("Start module (%s)"%os.getpid()) - _run_command(cmd, jid, log_path) - debug("Module complete (%s)"%os.getpid()) - sys.exit(0) - -except Exception, err: - debug("error: %s"%(err)) - raise err diff --git a/library/inventory/add_host b/library/inventory/add_host deleted file mode 100644 index 4fd4e1eb15f..00000000000 --- a/library/inventory/add_host +++ /dev/null @@ -1,36 +0,0 @@ -# -*- mode: python -*- - -DOCUMENTATION = ''' ---- -module: add_host -short_description: add a host (and alternatively a group) to the ansible-playbook in-memory inventory -description: - - Use variables to create new hosts and groups in inventory for use in later plays of the same playbook. - Takes variables so you can define the new hosts more fully. -version_added: "0.9" -options: - name: - aliases: [ 'hostname', 'host' ] - description: - - The hostname/ip of the host to add to the inventory, can include a colon and a port number. - required: true - groups: - aliases: [ 'groupname', 'group' ] - description: - - The groups to add the hostname to, comma separated. - required: false -author: Seth Vidal -''' - -EXAMPLES = ''' -# add host to group 'just_created' with variable foo=42 -- add_host: name={{ ip_from_ec2 }} groups=just_created foo=42 - -# add a host with a non-standard port local to your machines -- add_host: name={{ new_ip }}:{{ new_port }} - -# add a host alias that we reach through a tunnel -- add_host: hostname={{ new_ip }} - ansible_ssh_host={{ inventory_hostname }} - ansible_ssh_port={{ new_port }} -''' diff --git a/library/inventory/group_by b/library/inventory/group_by deleted file mode 100644 index d09552e662c..00000000000 --- a/library/inventory/group_by +++ /dev/null @@ -1,25 +0,0 @@ -# -*- mode: python -*- - -DOCUMENTATION = ''' ---- -module: group_by -short_description: Create Ansible groups based on facts -description: - - Use facts to create ad-hoc groups that can be used later in a playbook. -version_added: "0.9" -options: - key: - description: - - The variables whose values will be used as groups - required: true -author: Jeroen Hoekx -notes: - - Spaces in group names are converted to dashes '-'. -''' - -EXAMPLES = ''' -# Create groups based on the machine architecture -- group_by: key=machine_{{ ansible_machine }} -# Create groups like 'kvm-host' -- group_by: key=virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }} -''' diff --git a/library/messaging/rabbitmq_parameter b/library/messaging/rabbitmq_parameter deleted file mode 100644 index 2f78bd4ee15..00000000000 --- a/library/messaging/rabbitmq_parameter +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Chatham Financial -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rabbitmq_parameter -short_description: Adds or removes parameters to RabbitMQ -description: - - Manage dynamic, cluster-wide parameters for RabbitMQ -version_added: "1.1" -author: Chris Hoffman -options: - component: - description: - - Name of the component of which the parameter is being set - required: true - default: null - name: - description: - - Name of the parameter being set - required: true - default: null - value: - description: - - Value of the parameter, as a JSON term - required: false - default: null - vhost: - description: - - vhost to apply access privileges. - required: false - default: / - node: - description: - - erlang node name of the rabbit we wish to configure - required: false - default: rabbit - version_added: "1.2" - state: - description: - - Specify if user is to be added or removed - required: false - default: present - choices: [ 'present', 'absent'] -''' - -EXAMPLES = """ -# Set the federation parameter 'local_username' to a value of 'guest' (in quotes) -- rabbitmq_parameter: component=federation - name=local-username - value='"guest"' - state=present -""" - -class RabbitMqParameter(object): - def __init__(self, module, component, name, value, vhost, node): - self.module = module - self.component = component - self.name = name - self.value = value - self.vhost = vhost - self.node = node - - self._value = None - - self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) - - def _exec(self, args, run_in_check_mode=False): - if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = [self._rabbitmqctl, '-q', '-n', self.node] - rc, out, err = self.module.run_command(cmd + args, check_rc=True) - return out.splitlines() - return list() - - def get(self): - parameters = self._exec(['list_parameters', '-p', self.vhost], True) - - for param_item in parameters: - component, name, value = param_item.split('\t') - - if component == self.component and name == self.name: - self._value = value - return True - return False - - def set(self): - self._exec(['set_parameter', '-p', self.vhost, self.component, self.name, self.value]) - - def delete(self): - self._exec(['clear_parameter', '-p', self.vhost, self.component, self.name]) - - def has_modifications(self): - return self.value != self._value - -def main(): - arg_spec = dict( - component=dict(required=True), - name=dict(required=True), - value=dict(default=None), - vhost=dict(default='/'), - state=dict(default='present', choices=['present', 'absent']), - node=dict(default='rabbit') - ) - module = AnsibleModule( - argument_spec=arg_spec, - supports_check_mode=True - ) - - component = module.params['component'] - name = module.params['name'] - value = module.params['value'] - vhost = module.params['vhost'] - state = module.params['state'] - node = module.params['node'] - - rabbitmq_parameter = RabbitMqParameter(module, component, name, value, vhost, node) - - changed = False - if rabbitmq_parameter.get(): - if state == 'absent': - rabbitmq_parameter.delete() - changed = True - else: - if rabbitmq_parameter.has_modifications(): - rabbitmq_parameter.set() - changed = True - elif state == 'present': - rabbitmq_parameter.set() - changed = True - - module.exit_json(changed=changed, component=component, name=name, vhost=vhost, state=state) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/messaging/rabbitmq_plugin b/library/messaging/rabbitmq_plugin deleted file mode 100644 index 53c38f978d5..00000000000 --- a/library/messaging/rabbitmq_plugin +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Chatham Financial -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rabbitmq_plugin -short_description: Adds or removes plugins to RabbitMQ -description: - - Enables or disables RabbitMQ plugins -version_added: "1.1" -author: Chris Hoffman -options: - names: - description: - - Comma-separated list of plugin names - required: true - default: null - aliases: [name] - new_only: - description: - - Only enable missing plugins - - Does not disable plugins that are not in the names list - required: false - default: "no" - choices: [ "yes", "no" ] - state: - description: - - Specify if plugins are to be enabled or disabled - required: false - default: enabled - choices: [enabled, disabled] - prefix: - description: - - Specify a custom install prefix to a Rabbit - required: false - version_added: "1.3" - default: null -''' - -EXAMPLES = ''' -# Enables the rabbitmq_management plugin -- rabbitmq_plugin: names=rabbitmq_management state=enabled -''' - -class RabbitMqPlugins(object): - def __init__(self, module): - self.module = module - - if module.params['prefix']: - self._rabbitmq_plugins = module.params['prefix'] + "/sbin/rabbitmq-plugins" - else: - self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True) - - def _exec(self, args, run_in_check_mode=False): - if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = [self._rabbitmq_plugins] - rc, out, err = self.module.run_command(cmd + args, check_rc=True) - return out.splitlines() - return list() - - def get_all(self): - return self._exec(['list', '-E', '-m'], True) - - def enable(self, name): - self._exec(['enable', name]) - - def disable(self, name): - self._exec(['disable', name]) - -def main(): - arg_spec = dict( - names=dict(required=True, aliases=['name']), - new_only=dict(default='no', type='bool'), - state=dict(default='enabled', choices=['enabled', 'disabled']), - prefix=dict(required=False, default=None) - ) - module = AnsibleModule( - argument_spec=arg_spec, - supports_check_mode=True - ) - - names = module.params['names'].split(',') - new_only = module.params['new_only'] - state = module.params['state'] - - rabbitmq_plugins = RabbitMqPlugins(module) - enabled_plugins = rabbitmq_plugins.get_all() - - enabled = [] - disabled = [] - if state == 'enabled': - if not new_only: - for plugin in enabled_plugins: - if plugin not in names: - rabbitmq_plugins.disable(plugin) - disabled.append(plugin) - - for name in names: - if name not in enabled_plugins: - rabbitmq_plugins.enable(name) - enabled.append(name) - else: - for plugin in enabled_plugins: - if plugin in names: - rabbitmq_plugins.disable(plugin) - disabled.append(plugin) - - changed = len(enabled) > 0 or len(disabled) > 0 - module.exit_json(changed=changed, enabled=enabled, disabled=disabled) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/messaging/rabbitmq_policy b/library/messaging/rabbitmq_policy deleted file mode 100644 index 800c3822d55..00000000000 --- a/library/messaging/rabbitmq_policy +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, John Dewey -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: rabbitmq_policy -short_description: Manage the state of policies in RabbitMQ. -description: - - Manage the state of a virtual host in RabbitMQ. -version_added: "1.5" -author: John Dewey -options: - name: - description: - - The name of the policy to manage. - required: true - default: null - vhost: - description: - - The name of the vhost to apply to. - required: false - default: / - pattern: - description: - - A regex of queues to apply the policy to. - required: true - default: null - tags: - description: - - A dict or string describing the policy. - required: true - default: null - priority: - description: - - The priority of the policy. - required: false - default: 0 - node: - description: - - Erlang node name of the rabbit we wish to configure. - required: false - default: rabbit - state: - description: - - The state of the policy. - default: present - choices: [present, absent] -''' - -EXAMPLES = ''' -- name: ensure the default vhost contains the HA policy via a dict - rabbitmq_policy: name=HA pattern='.*' - args: - tags: - "ha-mode": all - -- name: ensure the default vhost contains the HA policy - rabbitmq_policy: name=HA pattern='.*' tags="ha-mode=all" -''' -class RabbitMqPolicy(object): - def __init__(self, module, name): - self._module = module - self._name = name - self._vhost = module.params['vhost'] - self._pattern = module.params['pattern'] - self._tags = module.params['tags'] - self._priority = module.params['priority'] - self._node = module.params['node'] - self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) - - def _exec(self, args, run_in_check_mode=False): - if not self._module.check_mode or (self._module.check_mode and run_in_check_mode): - cmd = [self._rabbitmqctl, '-q', '-n', self._node] - args.insert(1, '-p') - args.insert(2, self._vhost) - rc, out, err = self._module.run_command(cmd + args, check_rc=True) - return out.splitlines() - return list() - - def list(self): - policies = self._exec(['list_policies'], True) - - for policy in policies: - policy_name = policy.split('\t')[1] - if policy_name == self._name: - return True - return False - - def set(self): - import json - args = ['set_policy'] - args.append(self._name) - args.append(self._pattern) - args.append(json.dumps(self._tags)) - args.append('--priority') - args.append(self._priority) - return self._exec(args) - - def clear(self): - return self._exec(['clear_policy', self._name]) - - -def main(): - arg_spec = dict( - name=dict(required=True), - vhost=dict(default='/'), - pattern=dict(required=True), - tags=dict(type='dict', required=True), - priority=dict(default='0'), - node=dict(default='rabbit'), - state=dict(default='present', choices=['present', 'absent']), - ) - - module = AnsibleModule( - argument_spec=arg_spec, - supports_check_mode=True - ) - - name = module.params['name'] - state = module.params['state'] - rabbitmq_policy = RabbitMqPolicy(module, name) - - changed = False - if rabbitmq_policy.list(): - if state == 'absent': - rabbitmq_policy.clear() - changed = True - else: - changed = False - elif state == 'present': - rabbitmq_policy.set() - changed = True - - module.exit_json(changed=changed, name=name, state=state) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/messaging/rabbitmq_user b/library/messaging/rabbitmq_user deleted file mode 100644 index 1cbee360dff..00000000000 --- a/library/messaging/rabbitmq_user +++ /dev/null @@ -1,249 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Chatham Financial -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rabbitmq_user -short_description: Adds or removes users to RabbitMQ -description: - - Add or remove users to RabbitMQ and assign permissions -version_added: "1.1" -author: Chris Hoffman -options: - user: - description: - - Name of user to add - required: true - default: null - aliases: [username, name] - password: - description: - - Password of user to add. - - To change the password of an existing user, you must also specify - C(force=yes). - required: false - default: null - tags: - description: - - User tags specified as comma delimited - required: false - default: null - vhost: - description: - - vhost to apply access privileges. - required: false - default: / - node: - description: - - erlang node name of the rabbit we wish to configure - required: false - default: rabbit - version_added: "1.2" - configure_priv: - description: - - Regular expression to restrict configure actions on a resource - for the specified vhost. - - By default all actions are restricted. - required: false - default: ^$ - write_priv: - description: - - Regular expression to restrict configure actions on a resource - for the specified vhost. - - By default all actions are restricted. - required: false - default: ^$ - read_priv: - description: - - Regular expression to restrict configure actions on a resource - for the specified vhost. - - By default all actions are restricted. - required: false - default: ^$ - force: - description: - - Deletes and recreates the user. - required: false - default: "no" - choices: [ "yes", "no" ] - state: - description: - - Specify if user is to be added or removed - required: false - default: present - choices: [present, absent] -''' - -EXAMPLES = ''' -# Add user to server and assign full access control -- rabbitmq_user: user=joe - password=changeme - vhost=/ - configure_priv=.* - read_priv=.* - write_priv=.* - state=present -''' - -class RabbitMqUser(object): - def __init__(self, module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node): - self.module = module - self.username = username - self.password = password - self.node = node - if tags is None: - self.tags = list() - else: - self.tags = tags.split(',') - - permissions = dict( - vhost=vhost, - configure_priv=configure_priv, - write_priv=write_priv, - read_priv=read_priv - ) - self.permissions = permissions - - self._tags = None - self._permissions = None - self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) - - def _exec(self, args, run_in_check_mode=False): - if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = [self._rabbitmqctl, '-q', '-n', self.node] - rc, out, err = self.module.run_command(cmd + args, check_rc=True) - return out.splitlines() - return list() - - def get(self): - users = self._exec(['list_users'], True) - - for user_tag in users: - user, tags = user_tag.split('\t') - - if user == self.username: - for c in ['[',']',' ']: - tags = tags.replace(c, '') - - if tags != '': - self._tags = tags.split(',') - else: - self._tags = list() - - self._permissions = self._get_permissions() - return True - return False - - def _get_permissions(self): - perms_out = self._exec(['list_user_permissions', self.username], True) - - for perm in perms_out: - vhost, configure_priv, write_priv, read_priv = perm.split('\t') - if vhost == self.permissions['vhost']: - return dict(vhost=vhost, configure_priv=configure_priv, write_priv=write_priv, read_priv=read_priv) - - return dict() - - def add(self): - self._exec(['add_user', self.username, self.password]) - - def delete(self): - self._exec(['delete_user', self.username]) - - def set_tags(self): - self._exec(['set_user_tags', self.username] + self.tags) - - def set_permissions(self): - cmd = ['set_permissions'] - cmd.append('-p') - cmd.append(self.permissions['vhost']) - cmd.append(self.username) - cmd.append(self.permissions['configure_priv']) - cmd.append(self.permissions['write_priv']) - cmd.append(self.permissions['read_priv']) - self._exec(cmd) - - def has_tags_modifications(self): - return set(self.tags) != set(self._tags) - - def has_permissions_modifications(self): - return self._permissions != self.permissions - -def main(): - arg_spec = dict( - user=dict(required=True, aliases=['username', 'name']), - password=dict(default=None), - tags=dict(default=None), - vhost=dict(default='/'), - configure_priv=dict(default='^$'), - write_priv=dict(default='^$'), - read_priv=dict(default='^$'), - force=dict(default='no', type='bool'), - state=dict(default='present', choices=['present', 'absent']), - node=dict(default='rabbit') - ) - module = AnsibleModule( - argument_spec=arg_spec, - supports_check_mode=True - ) - - username = module.params['user'] - password = module.params['password'] - tags = module.params['tags'] - vhost = module.params['vhost'] - configure_priv = module.params['configure_priv'] - write_priv = module.params['write_priv'] - read_priv = module.params['read_priv'] - force = module.params['force'] - state = module.params['state'] - node = module.params['node'] - - rabbitmq_user = RabbitMqUser(module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node) - - changed = False - if rabbitmq_user.get(): - if state == 'absent': - rabbitmq_user.delete() - changed = True - else: - if force: - rabbitmq_user.delete() - rabbitmq_user.add() - rabbitmq_user.get() - changed = True - - if rabbitmq_user.has_tags_modifications(): - rabbitmq_user.set_tags() - changed = True - - if rabbitmq_user.has_permissions_modifications(): - rabbitmq_user.set_permissions() - changed = True - elif state == 'present': - rabbitmq_user.add() - rabbitmq_user.set_tags() - rabbitmq_user.set_permissions() - changed = True - - module.exit_json(changed=changed, user=username, state=state) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/messaging/rabbitmq_vhost b/library/messaging/rabbitmq_vhost deleted file mode 100644 index fd4b04a683f..00000000000 --- a/library/messaging/rabbitmq_vhost +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Chatham Financial -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: rabbitmq_vhost -short_description: Manage the state of a virtual host in RabbitMQ -description: - - Manage the state of a virtual host in RabbitMQ -version_added: "1.1" -author: Chris Hoffman -options: - name: - description: - - The name of the vhost to manage - required: true - default: null - aliases: [vhost] - node: - description: - - erlang node name of the rabbit we wish to configure - required: false - default: rabbit - version_added: "1.2" - tracing: - description: - - Enable/disable tracing for a vhost - default: "no" - choices: [ "yes", "no" ] - aliases: [trace] - state: - description: - - The state of vhost - default: present - choices: [present, absent] -''' - -EXAMPLES = ''' -# Ensure that the vhost /test exists. -- rabbitmq_vhost: name=/test state=present -''' - -class RabbitMqVhost(object): - def __init__(self, module, name, tracing, node): - self.module = module - self.name = name - self.tracing = tracing - self.node = node - - self._tracing = False - self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) - - def _exec(self, args, run_in_check_mode=False): - if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = [self._rabbitmqctl, '-q', '-n', self.node] - rc, out, err = self.module.run_command(cmd + args, check_rc=True) - return out.splitlines() - return list() - - def get(self): - vhosts = self._exec(['list_vhosts', 'name', 'tracing'], True) - - for vhost in vhosts: - name, tracing = vhost.split('\t') - if name == self.name: - self._tracing = self.module.boolean(tracing) - return True - return False - - def add(self): - return self._exec(['add_vhost', self.name]) - - def delete(self): - return self._exec(['delete_vhost', self.name]) - - def set_tracing(self): - if self.tracing != self._tracing: - if self.tracing: - self._enable_tracing() - else: - self._disable_tracing() - return True - return False - - def _enable_tracing(self): - return self._exec(['trace_on', '-p', self.name]) - - def _disable_tracing(self): - return self._exec(['trace_off', '-p', self.name]) - - -def main(): - arg_spec = dict( - name=dict(required=True, aliases=['vhost']), - tracing=dict(default='off', aliases=['trace'], type='bool'), - state=dict(default='present', choices=['present', 'absent']), - node=dict(default='rabbit'), - ) - - module = AnsibleModule( - argument_spec=arg_spec, - supports_check_mode=True - ) - - name = module.params['name'] - tracing = module.params['tracing'] - state = module.params['state'] - node = module.params['node'] - - rabbitmq_vhost = RabbitMqVhost(module, name, tracing, node) - - changed = False - if rabbitmq_vhost.get(): - if state == 'absent': - rabbitmq_vhost.delete() - changed = True - else: - if rabbitmq_vhost.set_tracing(): - changed = True - elif state == 'present': - rabbitmq_vhost.add() - rabbitmq_vhost.set_tracing() - changed = True - - module.exit_json(changed=changed, name=name, state=state) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/monitoring/airbrake_deployment b/library/monitoring/airbrake_deployment deleted file mode 100644 index e1c490b881b..00000000000 --- a/library/monitoring/airbrake_deployment +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Bruce Pennypacker -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: airbrake_deployment -version_added: "1.2" -author: Bruce Pennypacker -short_description: Notify airbrake about app deployments -description: - - Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking) -options: - token: - description: - - API token. - required: true - environment: - description: - - The airbrake environment name, typically 'production', 'staging', etc. - required: true - user: - description: - - The username of the person doing the deployment - required: false - repo: - description: - - URL of the project repository - required: false - revision: - description: - - A hash, number, tag, or other identifier showing what revision was deployed - required: false - url: - description: - - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit. - required: false - default: "https://airbrake.io/deploys" - version_added: "1.5" - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] -''' - -EXAMPLES = ''' -- airbrake_deployment: token=AAAAAA - environment='staging' - user='ansible' - revision=4.2 -''' - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True), - environment=dict(required=True), - user=dict(required=False), - repo=dict(required=False), - revision=dict(required=False), - url=dict(required=False, default='https://api.airbrake.io/deploys.txt'), - validate_certs=dict(default='yes', type='bool'), - ), - supports_check_mode=True - ) - - # build list of params - params = {} - - if module.params["environment"]: - params["deploy[rails_env]"] = module.params["environment"] - - if module.params["user"]: - params["deploy[local_username]"] = module.params["user"] - - if module.params["repo"]: - params["deploy[scm_repository]"] = module.params["repo"] - - if module.params["revision"]: - params["deploy[scm_revision]"] = module.params["revision"] - - params["api_key"] = module.params["token"] - - url = module.params.get('url') - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=True) - - # Send the data to airbrake - data = urllib.urlencode(params) - response, info = fetch_url(module, url, data=data) - if info['status'] == 200: - module.exit_json(changed=True) - else: - module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() - diff --git a/library/monitoring/bigpanda b/library/monitoring/bigpanda deleted file mode 100644 index 11950287078..00000000000 --- a/library/monitoring/bigpanda +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python - -DOCUMENTATION = ''' ---- -module: bigpanda -author: BigPanda -short_description: Notify BigPanda about deployments -version_added: "1.8" -description: - - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls. -options: - component: - description: - - "The name of the component being deployed. Ex: billing" - required: true - alias: name - version: - description: - - The deployment version. - required: true - token: - description: - - API token. - required: true - state: - description: - - State of the deployment. - required: true - choices: ['started', 'finished', 'failed'] - hosts: - description: - - Name of affected host name. Can be a list. - required: false - default: machine's hostname - alias: host - env: - description: - - The environment name, typically 'production', 'staging', etc. - required: false - owner: - description: - - The person responsible for the deployment. - required: false - description: - description: - - Free text description of the deployment. - required: false - url: - description: - - Base URL of the API server. - required: False - default: https://api.bigpanda.io - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] -''' - -EXAMPLES = ''' -- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=started -... -- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=finished - -or using a deployment object: -- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=started - register: deployment - -- bigpanda: state=finished - args: deployment - -If outside servers aren't reachable from your machine, use local_action and pass the hostname: -- local_action: bigpanda component=myapp version=1.3 hosts={{ansible_hostname}} token={{ bigpanda_token }} state=started - register: deployment -... -- local_action: bigpanda state=finished - args: deployment -''' - -# =========================================== -# Module execution. -# -import socket - -def main(): - - module = AnsibleModule( - argument_spec=dict( - component=dict(required=True, aliases=['name']), - version=dict(required=True), - token=dict(required=True), - state=dict(required=True, choices=['started', 'finished', 'failed']), - hosts=dict(required=False, default=[socket.gethostname()], aliases=['host']), - env=dict(required=False), - owner=dict(required=False), - description=dict(required=False), - message=dict(required=False), - source_system=dict(required=False, default='ansible'), - validate_certs=dict(default='yes', type='bool'), - url=dict(required=False, default='https://api.bigpanda.io'), - ), - supports_check_mode=True, - check_invalid_arguments=False, - ) - - token = module.params['token'] - state = module.params['state'] - url = module.params['url'] - - # Build the common request body - body = dict() - for k in ('component', 'version', 'hosts'): - v = module.params[k] - if v is not None: - body[k] = v - - if not isinstance(body['hosts'], list): - body['hosts'] = [body['hosts']] - - # Insert state-specific attributes to body - if state == 'started': - for k in ('source_system', 'env', 'owner', 'description'): - v = module.params[k] - if v is not None: - body[k] = v - - request_url = url + '/data/events/deployments/start' - else: - message = module.params['message'] - if message is not None: - body['errorMessage'] = message - - if state == 'finished': - body['status'] = 'success' - else: - body['status'] = 'failure' - - request_url = url + '/data/events/deployments/end' - - # Build the deployment object we return - deployment = dict(token=token, url=url) - deployment.update(body) - if 'errorMessage' in deployment: - message = deployment.pop('errorMessage') - deployment['message'] = message - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=True, **deployment) - - # Send the data to bigpanda - data = json.dumps(body) - headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'} - try: - response, info = fetch_url(module, request_url, data=data, headers=headers) - if info['status'] == 200: - module.exit_json(changed=True, **deployment) - else: - module.fail_json(msg=json.dumps(info)) - except Exception as e: - module.fail_json(msg=str(e)) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/monitoring/boundary_meter b/library/monitoring/boundary_meter deleted file mode 100644 index da739d4306f..00000000000 --- a/library/monitoring/boundary_meter +++ /dev/null @@ -1,256 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Ansible module to add boundary meters. - -(c) 2013, curtis - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -import json -import datetime -import base64 -import os - -DOCUMENTATION = ''' - -module: boundary_meter -short_description: Manage boundary meters -description: - - This module manages boundary meters -version_added: "1.3" -author: curtis@serverascode.com -requirements: - - Boundary API access - - bprobe is required to send data, but not to register a meter - - Python urllib2 -options: - name: - description: - - meter name - required: true - state: - description: - - Whether to create or remove the client from boundary - required: false - default: true - choices: ["present", "absent"] - apiid: - description: - - Organizations boundary API ID - required: true - apikey: - description: - - Organizations boundary API KEY - required: true - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 - -notes: - - This module does not yet support boundary tags. - -''' - -EXAMPLES=''' -- name: Create meter - boundary_meter: apiid=AAAAAA api_key=BBBBBB state=present name={{ inventory_hostname }}" - -- name: Delete meter - boundary_meter: apiid=AAAAAA api_key=BBBBBB state=absent name={{ inventory_hostname }}" - -''' - -api_host = "api.boundary.com" -config_directory = "/etc/bprobe" - -# "resource" like thing or apikey? -def auth_encode(apikey): - auth = base64.standard_b64encode(apikey) - auth.replace("\n", "") - return auth - -def build_url(name, apiid, action, meter_id=None, cert_type=None): - if action == "create": - return 'https://%s/%s/meters' % (api_host, apiid) - elif action == "search": - return "https://%s/%s/meters?name=%s" % (api_host, apiid, name) - elif action == "certificates": - return "https://%s/%s/meters/%s/%s.pem" % (api_host, apiid, meter_id, cert_type) - elif action == "tags": - return "https://%s/%s/meters/%s/tags" % (api_host, apiid, meter_id) - elif action == "delete": - return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id) - -def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None): - - if meter_id is None: - url = build_url(name, apiid, action) - else: - if cert_type is None: - url = build_url(name, apiid, action, meter_id) - else: - url = build_url(name, apiid, action, meter_id, cert_type) - - headers = dict() - headers["Authorization"] = "Basic %s" % auth_encode(apikey) - headers["Content-Type"] = "application/json" - - return fetch_url(module, url, data=data, headers=headers) - -def create_meter(module, name, apiid, apikey): - - meters = search_meter(module, name, apiid, apikey) - - if len(meters) > 0: - # If the meter already exists, do nothing - module.exit_json(status="Meter " + name + " already exists",changed=False) - else: - # If it doesn't exist, create it - body = '{"name":"' + name + '"}' - response, info = http_request(module, name, apiid, apikey, data=body, action="create") - - if info['status'] != 200: - module.fail_json(msg="Failed to connect to api host to create meter") - - # If the config directory doesn't exist, create it - if not os.path.exists(config_directory): - try: - os.makedirs(config_directory) - except: - module.fail_json("Could not create " + config_directory) - - - # Download both cert files from the api host - types = ['key', 'cert'] - for cert_type in types: - try: - # If we can't open the file it's not there, so we should download it - cert_file = open('%s/%s.pem' % (config_directory,cert_type)) - except IOError: - # Now download the file... - rc = download_request(module, name, apiid, apikey, cert_type) - if rc == False: - module.fail_json("Download request for " + cert_type + ".pem failed") - - return 0, "Meter " + name + " created" - -def search_meter(module, name, apiid, apikey): - - response, info = http_request(module, name, apiid, apikey, action="search") - - if info['status'] != 200: - module.fail_json("Failed to connect to api host to search for meter") - - # Return meters - return json.loads(response.read()) - -def get_meter_id(module, name, apiid, apikey): - # In order to delete the meter we need its id - meters = search_meter(module, name, apiid, apikey) - - if len(meters) > 0: - return meters[0]['id'] - else: - return None - -def delete_meter(module, name, apiid, apikey): - - meter_id = get_meter_id(module, name, apiid, apikey) - - if meter_id is None: - return 1, "Meter does not exist, so can't delete it" - else: - response, info = http_request(module, name, apiid, apikey, action, meter_id) - if info['status'] != 200: - module.fail_json("Failed to delete meter") - - # Each new meter gets a new key.pem and ca.pem file, so they should be deleted - types = ['cert', 'key'] - for cert_type in types: - try: - cert_file = '%s/%s.pem' % (config_directory,cert_type) - os.remove(cert_file) - except OSError, e: - module.fail_json("Failed to remove " + cert_type + ".pem file") - - return 0, "Meter " + name + " deleted" - -def download_request(module, name, apiid, apikey, cert_type): - - meter_id = get_meter_id(module, name, apiid, apikey) - - if meter_id is not None: - action = "certificates" - response, info = http_request(module, name, apiid, apikey, action, meter_id, cert_type) - if info['status'] != 200: - module.fail_json("Failed to connect to api host to download certificate") - - if result: - try: - cert_file_path = '%s/%s.pem' % (config_directory,cert_type) - body = response.read() - cert_file = open(cert_file_path, 'w') - cert_file.write(body) - cert_file.close - os.chmod(cert_file_path, 0o600) - except: - module.fail_json("Could not write to certificate file") - - return True - else: - module.fail_json("Could not get meter id") - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(required=True, choices=['present', 'absent']), - name=dict(required=False), - apikey=dict(required=True), - apiid=dict(required=True), - validate_certs = dict(default='yes', type='bool'), - ) - ) - - state = module.params['state'] - name= module.params['name'] - apikey = module.params['api_key'] - apiid = module.params['api_id'] - - if state == "present": - (rc, result) = create_meter(module, name, apiid, apikey) - - if state == "absent": - (rc, result) = delete_meter(module, name, apiid, apikey) - - if rc != 0: - module.fail_json(msg=result) - - module.exit_json(status=result,changed=True) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() - diff --git a/library/monitoring/datadog_event b/library/monitoring/datadog_event deleted file mode 100644 index 5d38dd4c31d..00000000000 --- a/library/monitoring/datadog_event +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Author: Artūras 'arturaz' Šlajus -# -# This module is proudly sponsored by iGeolise (www.igeolise.com) and -# Tiny Lab Productions (www.tinylabproductions.com). - -DOCUMENTATION = ''' ---- -module: datadog_event -short_description: Posts events to DataDog service -description: -- "Allows to post events to DataDog (www.datadoghq.com) service." -- "Uses http://docs.datadoghq.com/api/#events API." -version_added: "1.3" -author: Artūras 'arturaz' Šlajus -notes: [] -requirements: [urllib2] -options: - api_key: - description: ["Your DataDog API key."] - required: true - default: null - title: - description: ["The event title."] - required: true - default: null - text: - description: ["The body of the event."] - required: true - default: null - date_happened: - description: - - POSIX timestamp of the event. - - Default value is now. - required: false - default: now - priority: - description: ["The priority of the event."] - required: false - default: normal - choices: [normal, low] - tags: - description: ["Comma separated list of tags to apply to the event."] - required: false - default: null - alert_type: - description: ["Type of alert."] - required: false - default: info - choices: ['error', 'warning', 'info', 'success'] - aggregation_key: - description: ["An arbitrary string to use for aggregation."] - required: false - default: null - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 -''' - -EXAMPLES = ''' -# Post an event with low priority -datadog_event: title="Testing from ansible" text="Test!" priority="low" - api_key="6873258723457823548234234234" -# Post an event with several tags -datadog_event: title="Testing from ansible" text="Test!" - api_key="6873258723457823548234234234" - tags=aa,bb,cc -''' - -import socket - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True), - title=dict(required=True), - text=dict(required=True), - date_happened=dict(required=False, default=None, type='int'), - priority=dict( - required=False, default='normal', choices=['normal', 'low'] - ), - tags=dict(required=False, default=None), - alert_type=dict( - required=False, default='info', - choices=['error', 'warning', 'info', 'success'] - ), - aggregation_key=dict(required=False, default=None), - source_type_name=dict( - required=False, default='my apps', - choices=['nagios', 'hudson', 'jenkins', 'user', 'my apps', - 'feed', 'chef', 'puppet', 'git', 'bitbucket', 'fabric', - 'capistrano'] - ), - validate_certs = dict(default='yes', type='bool'), - ) - ) - - post_event(module) - -def post_event(module): - uri = "https://app.datadoghq.com/api/v1/events?api_key=%s" % module.params['api_key'] - - body = dict( - title=module.params['title'], - text=module.params['text'], - priority=module.params['priority'], - alert_type=module.params['alert_type'] - ) - if module.params['date_happened'] != None: - body['date_happened'] = module.params['date_happened'] - if module.params['tags'] != None: - body['tags'] = module.params['tags'].split(",") - if module.params['aggregation_key'] != None: - body['aggregation_key'] = module.params['aggregation_key'] - if module.params['source_type_name'] != None: - body['source_type_name'] = module.params['source_type_name'] - - json_body = module.jsonify(body) - headers = {"Content-Type": "application/json"} - - (response, info) = fetch_url(module, uri, data=json_body, headers=headers) - if info['status'] == 200: - response_body = response.read() - response_json = module.from_json(response_body) - if response_json['status'] == 'ok': - module.exit_json(changed=True) - else: - module.fail_json(msg=response) - else: - module.fail_json(**info) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/monitoring/librato_annotation b/library/monitoring/librato_annotation deleted file mode 100644 index 63979f41bfb..00000000000 --- a/library/monitoring/librato_annotation +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (C) Seth Edwards, 2014 -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - - -import base64 - -DOCUMENTATION = ''' ---- -module: librato_annotation -short_description: create an annotation in librato -description: - - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically -version_added: "1.6" -author: Seth Edwards -requirements: - - urllib2 - - base64 -options: - user: - description: - - Librato account username - required: true - api_key: - description: - - Librato account api key - required: true - name: - description: - - The annotation stream name - - If the annotation stream does not exist, it will be created automatically - required: false - title: - description: - - The title of an annotation is a string and may contain spaces - - The title should be a short, high-level summary of the annotation e.g. v45 Deployment - required: true - source: - description: - - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population - required: false - description: - description: - - The description contains extra meta-data about a particular annotation - - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo! - required: false - start_time: - description: - - The unix timestamp indicating the the time at which the event referenced by this annotation started - required: false - end_time: - description: - - The unix timestamp indicating the the time at which the event referenced by this annotation ended - - For events that have a duration, this is a useful way to annotate the duration of the event - required: false - links: - description: - - See examples - required: true -''' - -EXAMPLES = ''' -# Create a simple annotation event with a source -- librato_annotation: - user: user@example.com - api_key: XXXXXXXXXXXXXXXXX - title: 'App Config Change' - source: 'foo.bar' - description: 'This is a detailed description of the config change' - -# Create an annotation that includes a link -- librato_annotation: - user: user@example.com - api_key: XXXXXXXXXXXXXXXXXX - name: 'code.deploy' - title: 'app code deploy' - description: 'this is a detailed description of a deployment' - links: - - { rel: 'example', href: 'http://www.example.com/deploy' } - -# Create an annotation with a start_time and end_time -- librato_annotation: - user: user@example.com - api_key: XXXXXXXXXXXXXXXXXX - name: 'maintenance' - title: 'Maintenance window' - description: 'This is a detailed description of maintenance' - start_time: 1395940006 - end_time: 1395954406 -''' - - -try: - import urllib2 - HAS_URLLIB2 = True -except ImportError: - HAS_URLLIB2 = False - -def post_annotation(module): - user = module.params['user'] - api_key = module.params['api_key'] - name = module.params['name'] - title = module.params['title'] - - url = 'https://metrics-api.librato.com/v1/annotations/%s' % name - params = {} - params['title'] = title - - if module.params['source'] != None: - params['source'] = module.params['source'] - if module.params['description'] != None: - params['description'] = module.params['description'] - if module.params['start_time'] != None: - params['start_time'] = module.params['start_time'] - if module.params['end_time'] != None: - params['end_time'] = module.params['end_time'] - if module.params['links'] != None: - params['links'] = module.params['links'] - - json_body = module.jsonify(params) - - headers = {} - headers['Content-Type'] = 'application/json' - headers['Authorization'] = b"Basic " + base64.b64encode(user + b":" + api_key).strip() - req = urllib2.Request(url, json_body, headers) - try: - response = urllib2.urlopen(req) - except urllib2.HTTPError as e: - module.fail_json(msg="Request Failed", reason=e.reason) - response = response.read() - module.exit_json(changed=True, annotation=response) - -def main(): - - module = AnsibleModule( - argument_spec = dict( - user = dict(required=True), - api_key = dict(required=True), - name = dict(required=False), - title = dict(required=True), - source = dict(required=False), - description = dict(required=False), - start_time = dict(required=False, default=None, type='int'), - end_time = dict(require=False, default=None, type='int'), - links = dict(type='list') - ) - ) - - post_annotation(module) - -from ansible.module_utils.basic import * -main() diff --git a/library/monitoring/logentries b/library/monitoring/logentries deleted file mode 100644 index 373f4f777ff..00000000000 --- a/library/monitoring/logentries +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Ivan Vanderbyl -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: logentries -author: Ivan Vanderbyl -short_description: Module for tracking logs via logentries.com -description: - - Sends logs to LogEntries in realtime -version_added: "1.6" -options: - path: - description: - - path to a log file - required: true - state: - description: - - following state of the log - choices: [ 'present', 'absent' ] - required: false - default: present -notes: - - Requires the LogEntries agent which can be installed following the instructions at logentries.com -''' -EXAMPLES = ''' -- logentries: path=/var/log/nginx/access.log state=present -- logentries: path=/var/log/nginx/error.log state=absent -''' - -def query_log_status(module, le_path, path, state="present"): - """ Returns whether a log is followed or not. """ - - if state == "present": - rc, out, err = module.run_command("%s followed %s" % (le_path, path)) - if rc == 0: - return True - - return False - -def follow_log(module, le_path, logs): - """ Follows one or more logs if not already followed. """ - - followed_count = 0 - - for log in logs: - if query_log_status(module, le_path, log): - continue - - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = module.run_command([le_path, 'follow', log]) - - if not query_log_status(module, le_path, log): - module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip())) - - followed_count += 1 - - if followed_count > 0: - module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,)) - - module.exit_json(changed=False, msg="logs(s) already followed") - -def unfollow_log(module, le_path, logs): - """ Unfollows one or more logs if followed. """ - - removed_count = 0 - - # Using a for loop incase of error, we can report the package that failed - for log in logs: - # Query the log first, to see if we even need to remove. - if not query_log_status(module, le_path, log): - continue - - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = module.run_command([le_path, 'rm', log]) - - if query_log_status(module, le_path, log): - module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip())) - - removed_count += 1 - - if removed_count > 0: - module.exit_json(changed=True, msg="removed %d package(s)" % removed_count) - - module.exit_json(changed=False, msg="logs(s) already unfollowed") - -def main(): - module = AnsibleModule( - argument_spec = dict( - path = dict(aliases=["name"], required=True), - state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]) - ), - supports_check_mode=True - ) - - le_path = module.get_bin_path('le', True, ['/usr/local/bin']) - - p = module.params - - # Handle multiple log files - logs = p["path"].split(",") - logs = filter(None, logs) - - if p["state"] in ["present", "followed"]: - follow_log(module, le_path, logs) - - elif p["state"] in ["absent", "unfollowed"]: - unfollow_log(module, le_path, logs) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/monitoring/monit b/library/monitoring/monit deleted file mode 100644 index 558f1e696f2..00000000000 --- a/library/monitoring/monit +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Darryl Stoflet -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: monit -short_description: Manage the state of a program monitored via Monit -description: - - Manage the state of a program monitored via I(Monit) -version_added: "1.2" -options: - name: - description: - - The name of the I(monit) program/process to manage - required: true - default: null - state: - description: - - The state of service - required: true - default: null - choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ] -requirements: [ ] -author: Darryl Stoflet -''' - -EXAMPLES = ''' -# Manage the state of program "httpd" to be in "started" state. -- monit: name=httpd state=started -''' - -def main(): - arg_spec = dict( - name=dict(required=True), - state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded']) - ) - - module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) - - name = module.params['name'] - state = module.params['state'] - - MONIT = module.get_bin_path('monit', True) - - if state == 'reloaded': - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = module.run_command('%s reload' % MONIT) - if rc != 0: - module.fail_json(msg='monit reload failed', stdout=out, stderr=err) - module.exit_json(changed=True, name=name, state=state) - - def status(): - """Return the status of the process in monit, or the empty string if not present.""" - rc, out, err = module.run_command('%s summary' % MONIT, check_rc=True) - for line in out.split('\n'): - # Sample output lines: - # Process 'name' Running - # Process 'name' Running - restart pending - parts = line.lower().split() - if len(parts) > 2 and parts[0] == 'process' and parts[1] == "'%s'" % name: - return ' '.join(parts[2:]) - else: - return '' - - def run_command(command): - """Runs a monit command, and returns the new status.""" - module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True) - return status() - - present = status() != '' - - if not present and not state == 'present': - module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state) - - if state == 'present': - if not present: - if module.check_mode: - module.exit_json(changed=True) - status = run_command('reload') - if status == '': - module.fail_json(msg='%s process not configured with monit' % name, name=name, state=state) - else: - module.exit_json(changed=True, name=name, state=state) - module.exit_json(changed=False, name=name, state=state) - - running = 'running' in status() - - if running and state in ['started', 'monitored']: - module.exit_json(changed=False, name=name, state=state) - - if running and state == 'stopped': - if module.check_mode: - module.exit_json(changed=True) - status = run_command('stop') - if status in ['not monitored'] or 'stop pending' in status: - module.exit_json(changed=True, name=name, state=state) - module.fail_json(msg='%s process not stopped' % name, status=status) - - if running and state == 'unmonitored': - if module.check_mode: - module.exit_json(changed=True) - status = run_command('unmonitor') - if status in ['not monitored']: - module.exit_json(changed=True, name=name, state=state) - module.fail_json(msg='%s process not unmonitored' % name, status=status) - - elif state == 'restarted': - if module.check_mode: - module.exit_json(changed=True) - status = run_command('restart') - if status in ['initializing', 'running'] or 'restart pending' in status: - module.exit_json(changed=True, name=name, state=state) - module.fail_json(msg='%s process not restarted' % name, status=status) - - elif not running and state == 'started': - if module.check_mode: - module.exit_json(changed=True) - status = run_command('start') - if status in ['initializing', 'running'] or 'start pending' in status: - module.exit_json(changed=True, name=name, state=state) - module.fail_json(msg='%s process not started' % name, status=status) - - elif not running and state == 'monitored': - if module.check_mode: - module.exit_json(changed=True) - status = run_command('monitor') - if status not in ['not monitored']: - module.exit_json(changed=True, name=name, state=state) - module.fail_json(msg='%s process not monitored' % name, status=status) - - module.exit_json(changed=False, name=name, state=state) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/monitoring/nagios b/library/monitoring/nagios deleted file mode 100644 index 9219766b86a..00000000000 --- a/library/monitoring/nagios +++ /dev/null @@ -1,880 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# This file is largely copied from the Nagios module included in the -# Func project. Original copyright follows: -# -# func-nagios - Schedule downtime and enables/disable notifications -# Copyright 2011, Red Hat, Inc. -# Tim Bielawa -# -# This software may be freely redistributed under the terms of the GNU -# general public license version 2. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - - -DOCUMENTATION = ''' ---- -module: nagios -short_description: Perform common tasks in Nagios related to downtime and notifications. -description: - - "The M(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts." - - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer to the host the playbook is currently running on. - - You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet). - - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime for the I(host itself), e.g., C(service=host). This keyword may not be given with other services at the same time. I(Setting alerts/downtime for a host does not affect alerts/downtime for any of the services running on it.) To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all). - - When using the M(nagios) module you will need to specify your Nagios server using the C(delegate_to) parameter. -version_added: "0.7" -options: - action: - description: - - Action to take. - required: true - default: null - choices: [ "downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", - "silence_nagios", "unsilence_nagios", "command" ] - host: - description: - - Host to operate on in Nagios. - required: false - default: null - cmdfile: - description: - - Path to the nagios I(command file) (FIFO pipe). - Only required if auto-detection fails. - required: false - default: auto-detected - author: - description: - - Author to leave downtime comments as. - Only usable with the C(downtime) action. - required: false - default: Ansible - minutes: - description: - - Minutes to schedule downtime for. - - Only usable with the C(downtime) action. - required: false - default: 30 - services: - description: - - What to manage downtime/alerts for. Separate multiple services with commas. - C(service) is an alias for C(services). - B(Required) option when using the C(downtime), C(enable_alerts), and C(disable_alerts) actions. - aliases: [ "service" ] - required: true - default: null - command: - description: - - The raw command to send to nagios, which - should not include the submitted time header or the line-feed - B(Required) option when using the C(command) action. - required: true - default: null - -author: Tim Bielawa -requirements: [ "Nagios" ] -''' - -EXAMPLES = ''' -# set 30 minutes of apache downtime -- nagios: action=downtime minutes=30 service=httpd host={{ inventory_hostname }} - -# schedule an hour of HOST downtime -- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }} - -# schedule downtime for ALL services on HOST -- nagios: action=downtime minutes=45 service=all host={{ inventory_hostname }} - -# schedule downtime for a few services -- nagios: action=downtime services=frob,foobar,qeuz host={{ inventory_hostname }} - -# enable SMART disk alerts -- nagios: action=enable_alerts service=smart host={{ inventory_hostname }} - -# "two services at once: disable httpd and nfs alerts" -- nagios: action=disable_alerts service=httpd,nfs host={{ inventory_hostname }} - -# disable HOST alerts -- nagios: action=disable_alerts service=host host={{ inventory_hostname }} - -# silence ALL alerts -- nagios: action=silence host={{ inventory_hostname }} - -# unsilence all alerts -- nagios: action=unsilence host={{ inventory_hostname }} - -# SHUT UP NAGIOS -- nagios: action=silence_nagios - -# ANNOY ME NAGIOS -- nagios: action=unsilence_nagios - -# command something -- nagios: action=command command='DISABLE_FAILURE_PREDICTION' -''' - -import ConfigParser -import types -import time -import os.path - -###################################################################### - - -def which_cmdfile(): - locations = [ - # rhel - '/etc/nagios/nagios.cfg', - # debian - '/etc/nagios3/nagios.cfg', - # older debian - '/etc/nagios2/nagios.cfg', - # bsd, solaris - '/usr/local/etc/nagios/nagios.cfg', - # groundwork it monitoring - '/usr/local/groundwork/nagios/etc/nagios.cfg', - # open monitoring distribution - '/omd/sites/oppy/tmp/nagios/nagios.cfg', - # ??? - '/usr/local/nagios/etc/nagios.cfg', - '/usr/local/nagios/nagios.cfg', - '/opt/nagios/etc/nagios.cfg', - '/opt/nagios/nagios.cfg', - # icinga on debian/ubuntu - '/etc/icinga/icinga.cfg', - # icinga installed from source (default location) - '/usr/local/icinga/etc/icinga.cfg', - ] - - for path in locations: - if os.path.exists(path): - for line in open(path): - if line.startswith('command_file'): - return line.split('=')[1].strip() - - return None - -###################################################################### - - -def main(): - ACTION_CHOICES = [ - 'downtime', - 'silence', - 'unsilence', - 'enable_alerts', - 'disable_alerts', - 'silence_nagios', - 'unsilence_nagios', - 'command', - ] - - module = AnsibleModule( - argument_spec=dict( - action=dict(required=True, default=None, choices=ACTION_CHOICES), - author=dict(default='Ansible'), - host=dict(required=False, default=None), - minutes=dict(default=30), - cmdfile=dict(default=which_cmdfile()), - services=dict(default=None, aliases=['service']), - command=dict(required=False, default=None), - ) - ) - - action = module.params['action'] - host = module.params['host'] - minutes = module.params['minutes'] - services = module.params['services'] - cmdfile = module.params['cmdfile'] - command = module.params['command'] - - ################################################################## - # Required args per action: - # downtime = (minutes, service, host) - # (un)silence = (host) - # (enable/disable)_alerts = (service, host) - # command = command - # - # AnsibleModule will verify most stuff, we need to verify - # 'minutes' and 'service' manually. - - ################################################################## - if action not in ['command', 'silence_nagios', 'unsilence_nagios']: - if not host: - module.fail_json(msg='no host specified for action requiring one') - ###################################################################### - if action == 'downtime': - # Make sure there's an actual service selected - if not services: - module.fail_json(msg='no service selected to set downtime for') - # Make sure minutes is a number - try: - m = int(minutes) - if not isinstance(m, types.IntType): - module.fail_json(msg='minutes must be a number') - except Exception: - module.fail_json(msg='invalid entry for minutes') - - ################################################################## - if action in ['enable_alerts', 'disable_alerts']: - if not services: - module.fail_json(msg='a service is required when setting alerts') - - if action in ['command']: - if not command: - module.fail_json(msg='no command passed for command action') - ################################################################## - if not cmdfile: - module.fail_json('unable to locate nagios.cfg') - - ################################################################## - ansible_nagios = Nagios(module, **module.params) - if module.check_mode: - module.exit_json(changed=True) - else: - ansible_nagios.act() - ################################################################## - - -###################################################################### -class Nagios(object): - """ - Perform common tasks in Nagios related to downtime and - notifications. - - The complete set of external commands Nagios handles is documented - on their website: - - http://old.nagios.org/developerinfo/externalcommands/commandlist.php - - Note that in the case of `schedule_svc_downtime`, - `enable_svc_notifications`, and `disable_svc_notifications`, the - service argument should be passed as a list. - """ - - def __init__(self, module, **kwargs): - self.module = module - self.action = kwargs['action'] - self.author = kwargs['author'] - self.host = kwargs['host'] - self.minutes = int(kwargs['minutes']) - self.cmdfile = kwargs['cmdfile'] - self.command = kwargs['command'] - - if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'): - self.services = kwargs['services'] - else: - self.services = kwargs['services'].split(',') - - self.command_results = [] - - def _now(self): - """ - The time in seconds since 12:00:00AM Jan 1, 1970 - """ - - return int(time.time()) - - def _write_command(self, cmd): - """ - Write the given command to the Nagios command file - """ - - try: - fp = open(self.cmdfile, 'w') - fp.write(cmd) - fp.flush() - fp.close() - self.command_results.append(cmd.strip()) - except IOError: - self.module.fail_json(msg='unable to write to nagios command file', - cmdfile=self.cmdfile) - - def _fmt_dt_str(self, cmd, host, duration, author=None, - comment="Scheduling downtime", start=None, - svc=None, fixed=1, trigger=0): - """ - Format an external-command downtime string. - - cmd - Nagios command ID - host - Host schedule downtime on - duration - Minutes to schedule downtime for - author - Name to file the downtime as - comment - Reason for running this command (upgrade, reboot, etc) - start - Start of downtime in seconds since 12:00AM Jan 1 1970 - Default is to use the entry time (now) - svc - Service to schedule downtime for, omit when for host downtime - fixed - Start now if 1, start when a problem is detected if 0 - trigger - Optional ID of event to start downtime from. Leave as 0 for - fixed downtime. - - Syntax: [submitted] COMMAND;;[] - ;;;;;; - - """ - - entry_time = self._now() - if start is None: - start = entry_time - - hdr = "[%s] %s;%s;" % (entry_time, cmd, host) - duration_s = (duration * 60) - end = start + duration_s - - if not author: - author = self.author - - if svc is not None: - dt_args = [svc, str(start), str(end), str(fixed), str(trigger), - str(duration_s), author, comment] - else: - # Downtime for a host if no svc specified - dt_args = [str(start), str(end), str(fixed), str(trigger), - str(duration_s), author, comment] - - dt_arg_str = ";".join(dt_args) - dt_str = hdr + dt_arg_str + "\n" - - return dt_str - - def _fmt_notif_str(self, cmd, host=None, svc=None): - """ - Format an external-command notification string. - - cmd - Nagios command ID. - host - Host to en/disable notifications on.. A value is not required - for global downtime - svc - Service to schedule downtime for. A value is not required - for host downtime. - - Syntax: [submitted] COMMAND;[;] - """ - - entry_time = self._now() - notif_str = "[%s] %s" % (entry_time, cmd) - if host is not None: - notif_str += ";%s" % host - - if svc is not None: - notif_str += ";%s" % svc - - notif_str += "\n" - - return notif_str - - def schedule_svc_downtime(self, host, services=[], minutes=30): - """ - This command is used to schedule downtime for a particular - service. - - During the specified downtime, Nagios will not send - notifications out about the service. - - Syntax: SCHEDULE_SVC_DOWNTIME;; - ;;;;;; - - """ - - cmd = "SCHEDULE_SVC_DOWNTIME" - for service in services: - dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, svc=service) - self._write_command(dt_cmd_str) - - def schedule_host_downtime(self, host, minutes=30): - """ - This command is used to schedule downtime for a particular - host. - - During the specified downtime, Nagios will not send - notifications out about the host. - - Syntax: SCHEDULE_HOST_DOWNTIME;;;; - ;;;; - """ - - cmd = "SCHEDULE_HOST_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, host, minutes) - self._write_command(dt_cmd_str) - - def schedule_host_svc_downtime(self, host, minutes=30): - """ - This command is used to schedule downtime for - all services associated with a particular host. - - During the specified downtime, Nagios will not send - notifications out about the host. - - SCHEDULE_HOST_SVC_DOWNTIME;;;; - ;;;; - """ - - cmd = "SCHEDULE_HOST_SVC_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, host, minutes) - self._write_command(dt_cmd_str) - - def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30): - """ - This command is used to schedule downtime for all hosts in a - particular hostgroup. - - During the specified downtime, Nagios will not send - notifications out about the hosts. - - Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;;; - ;;;;; - """ - - cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes) - self._write_command(dt_cmd_str) - - def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30): - """ - This command is used to schedule downtime for all services in - a particular hostgroup. - - During the specified downtime, Nagios will not send - notifications out about the services. - - Note that scheduling downtime for services does not - automatically schedule downtime for the hosts those services - are associated with. - - Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;;; - ;;;;; - """ - - cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes) - self._write_command(dt_cmd_str) - - def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30): - """ - This command is used to schedule downtime for all hosts in a - particular servicegroup. - - During the specified downtime, Nagios will not send - notifications out about the hosts. - - Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;; - ;;;;;; - - """ - - cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes) - self._write_command(dt_cmd_str) - - def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30): - """ - This command is used to schedule downtime for all services in - a particular servicegroup. - - During the specified downtime, Nagios will not send - notifications out about the services. - - Note that scheduling downtime for services does not - automatically schedule downtime for the hosts those services - are associated with. - - Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;; - ;;;;;; - - """ - - cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes) - self._write_command(dt_cmd_str) - - def disable_host_svc_notifications(self, host): - """ - This command is used to prevent notifications from being sent - out for all services on the specified host. - - Note that this command does not disable notifications from - being sent out about the host. - - Syntax: DISABLE_HOST_SVC_NOTIFICATIONS; - """ - - cmd = "DISABLE_HOST_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, host) - self._write_command(notif_str) - - def disable_host_notifications(self, host): - """ - This command is used to prevent notifications from being sent - out for the specified host. - - Note that this command does not disable notifications for - services associated with this host. - - Syntax: DISABLE_HOST_NOTIFICATIONS; - """ - - cmd = "DISABLE_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, host) - self._write_command(notif_str) - - def disable_svc_notifications(self, host, services=[]): - """ - This command is used to prevent notifications from being sent - out for the specified service. - - Note that this command does not disable notifications from - being sent out about the host. - - Syntax: DISABLE_SVC_NOTIFICATIONS;; - """ - - cmd = "DISABLE_SVC_NOTIFICATIONS" - for service in services: - notif_str = self._fmt_notif_str(cmd, host, svc=service) - self._write_command(notif_str) - - def disable_servicegroup_host_notifications(self, servicegroup): - """ - This command is used to prevent notifications from being sent - out for all hosts in the specified servicegroup. - - Note that this command does not disable notifications for - services associated with hosts in this service group. - - Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS; - """ - - cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, servicegroup) - self._write_command(notif_str) - - def disable_servicegroup_svc_notifications(self, servicegroup): - """ - This command is used to prevent notifications from being sent - out for all services in the specified servicegroup. - - Note that this does not prevent notifications from being sent - out about the hosts in this servicegroup. - - Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS; - """ - - cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, servicegroup) - self._write_command(notif_str) - - def disable_hostgroup_host_notifications(self, hostgroup): - """ - Disables notifications for all hosts in a particular - hostgroup. - - Note that this does not disable notifications for the services - associated with the hosts in the hostgroup - see the - DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that. - - Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS; - """ - - cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, hostgroup) - self._write_command(notif_str) - - def disable_hostgroup_svc_notifications(self, hostgroup): - """ - Disables notifications for all services associated with hosts - in a particular hostgroup. - - Note that this does not disable notifications for the hosts in - the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS - command for that. - - Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS; - """ - - cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, hostgroup) - self._write_command(notif_str) - - def enable_host_notifications(self, host): - """ - Enables notifications for a particular host. - - Note that this command does not enable notifications for - services associated with this host. - - Syntax: ENABLE_HOST_NOTIFICATIONS; - """ - - cmd = "ENABLE_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, host) - self._write_command(notif_str) - - def enable_host_svc_notifications(self, host): - """ - Enables notifications for all services on the specified host. - - Note that this does not enable notifications for the host. - - Syntax: ENABLE_HOST_SVC_NOTIFICATIONS; - """ - - cmd = "ENABLE_HOST_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, host) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def enable_svc_notifications(self, host, services=[]): - """ - Enables notifications for a particular service. - - Note that this does not enable notifications for the host. - - Syntax: ENABLE_SVC_NOTIFICATIONS;; - """ - - cmd = "ENABLE_SVC_NOTIFICATIONS" - nagios_return = True - return_str_list = [] - for service in services: - notif_str = self._fmt_notif_str(cmd, host, svc=service) - nagios_return = self._write_command(notif_str) and nagios_return - return_str_list.append(notif_str) - - if nagios_return: - return return_str_list - else: - return "Fail: could not write to the command file" - - def enable_hostgroup_host_notifications(self, hostgroup): - """ - Enables notifications for all hosts in a particular hostgroup. - - Note that this command does not enable notifications for - services associated with the hosts in this hostgroup. - - Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS; - """ - - cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, hostgroup) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def enable_hostgroup_svc_notifications(self, hostgroup): - """ - Enables notifications for all services that are associated - with hosts in a particular hostgroup. - - Note that this does not enable notifications for the hosts in - this hostgroup. - - Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS; - """ - - cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, hostgroup) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def enable_servicegroup_host_notifications(self, servicegroup): - """ - Enables notifications for all hosts that have services that - are members of a particular servicegroup. - - Note that this command does not enable notifications for - services associated with the hosts in this servicegroup. - - Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS; - """ - - cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, servicegroup) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def enable_servicegroup_svc_notifications(self, servicegroup): - """ - Enables notifications for all services that are members of a - particular servicegroup. - - Note that this does not enable notifications for the hosts in - this servicegroup. - - Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS; - """ - - cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, servicegroup) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def silence_host(self, host): - """ - This command is used to prevent notifications from being sent - out for the host and all services on the specified host. - - This is equivalent to calling disable_host_svc_notifications - and disable_host_notifications. - - Syntax: DISABLE_HOST_SVC_NOTIFICATIONS; - Syntax: DISABLE_HOST_NOTIFICATIONS; - """ - - cmd = [ - "DISABLE_HOST_SVC_NOTIFICATIONS", - "DISABLE_HOST_NOTIFICATIONS" - ] - nagios_return = True - return_str_list = [] - for c in cmd: - notif_str = self._fmt_notif_str(c, host) - nagios_return = self._write_command(notif_str) and nagios_return - return_str_list.append(notif_str) - - if nagios_return: - return return_str_list - else: - return "Fail: could not write to the command file" - - def unsilence_host(self, host): - """ - This command is used to enable notifications for the host and - all services on the specified host. - - This is equivalent to calling enable_host_svc_notifications - and enable_host_notifications. - - Syntax: ENABLE_HOST_SVC_NOTIFICATIONS; - Syntax: ENABLE_HOST_NOTIFICATIONS; - """ - - cmd = [ - "ENABLE_HOST_SVC_NOTIFICATIONS", - "ENABLE_HOST_NOTIFICATIONS" - ] - nagios_return = True - return_str_list = [] - for c in cmd: - notif_str = self._fmt_notif_str(c, host) - nagios_return = self._write_command(notif_str) and nagios_return - return_str_list.append(notif_str) - - if nagios_return: - return return_str_list - else: - return "Fail: could not write to the command file" - - def silence_nagios(self): - """ - This command is used to disable notifications for all hosts and services - in nagios. - - This is a 'SHUT UP, NAGIOS' command - """ - cmd = 'DISABLE_NOTIFICATIONS' - self._write_command(self._fmt_notif_str(cmd)) - - def unsilence_nagios(self): - """ - This command is used to enable notifications for all hosts and services - in nagios. - - This is a 'OK, NAGIOS, GO'' command - """ - cmd = 'ENABLE_NOTIFICATIONS' - self._write_command(self._fmt_notif_str(cmd)) - - def nagios_cmd(self, cmd): - """ - This sends an arbitrary command to nagios - - It prepends the submitted time and appends a \n - - You just have to provide the properly formatted command - """ - - pre = '[%s]' % int(time.time()) - - post = '\n' - cmdstr = '%s %s %s' % (pre, cmd, post) - self._write_command(cmdstr) - - def act(self): - """ - Figure out what you want to do from ansible, and then do the - needful (at the earliest). - """ - # host or service downtime? - if self.action == 'downtime': - if self.services == 'host': - self.schedule_host_downtime(self.host, self.minutes) - elif self.services == 'all': - self.schedule_host_svc_downtime(self.host, self.minutes) - else: - self.schedule_svc_downtime(self.host, - services=self.services, - minutes=self.minutes) - - # toggle the host AND service alerts - elif self.action == 'silence': - self.silence_host(self.host) - - elif self.action == 'unsilence': - self.unsilence_host(self.host) - - # toggle host/svc alerts - elif self.action == 'enable_alerts': - if self.services == 'host': - self.enable_host_notifications(self.host) - else: - self.enable_svc_notifications(self.host, - services=self.services) - - elif self.action == 'disable_alerts': - if self.services == 'host': - self.disable_host_notifications(self.host) - else: - self.disable_svc_notifications(self.host, - services=self.services) - elif self.action == 'silence_nagios': - self.silence_nagios() - - elif self.action == 'unsilence_nagios': - self.unsilence_nagios() - - elif self.action == 'command': - self.nagios_cmd(self.command) - - # wtf? - else: - self.module.fail_json(msg="unknown action specified: '%s'" % \ - self.action) - - self.module.exit_json(nagios_commands=self.command_results, - changed=True) - -###################################################################### -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/monitoring/newrelic_deployment b/library/monitoring/newrelic_deployment deleted file mode 100644 index 93d55832fd3..00000000000 --- a/library/monitoring/newrelic_deployment +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Matt Coddington -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: newrelic_deployment -version_added: "1.2" -author: Matt Coddington -short_description: Notify newrelic about app deployments -description: - - Notify newrelic about app deployments (see http://newrelic.github.io/newrelic_api/NewRelicApi/Deployment.html) -options: - token: - description: - - API token. - required: true - app_name: - description: - - (one of app_name or application_id are required) The value of app_name in the newrelic.yml file used by the application - required: false - application_id: - description: - - (one of app_name or application_id are required) The application id, found in the URL when viewing the application in RPM - required: false - changelog: - description: - - A list of changes for this deployment - required: false - description: - description: - - Text annotation for the deployment - notes for you - required: false - revision: - description: - - A revision number (e.g., git commit SHA) - required: false - user: - description: - - The name of the user/process that triggered this deployment - required: false - appname: - description: - - Name of the application - required: false - environment: - description: - - The environment for this deployment - required: false - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 - -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] -''' - -EXAMPLES = ''' -- newrelic_deployment: token=AAAAAA - app_name=myapp - user='ansible deployment' - revision=1.0 -''' - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True), - app_name=dict(required=False), - application_id=dict(required=False), - changelog=dict(required=False), - description=dict(required=False), - revision=dict(required=False), - user=dict(required=False), - appname=dict(required=False), - environment=dict(required=False), - validate_certs = dict(default='yes', type='bool'), - ), - supports_check_mode=True - ) - - # build list of params - params = {} - if module.params["app_name"] and module.params["application_id"]: - module.fail_json(msg="only one of 'app_name' or 'application_id' can be set") - - if module.params["app_name"]: - params["app_name"] = module.params["app_name"] - elif module.params["application_id"]: - params["application_id"] = module.params["application_id"] - else: - module.fail_json(msg="you must set one of 'app_name' or 'application_id'") - - for item in [ "changelog", "description", "revision", "user", "appname", "environment" ]: - if module.params[item]: - params[item] = module.params[item] - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=True) - - # Send the data to NewRelic - url = "https://rpm.newrelic.com/deployments.xml" - data = urllib.urlencode(params) - headers = { - 'x-api-key': module.params["token"], - } - response, info = fetch_url(module, url, data=data, headers=headers) - if info['status'] in (200, 201): - module.exit_json(changed=True) - else: - module.fail_json(msg="unable to update newrelic: %s" % info['msg']) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() - diff --git a/library/monitoring/pagerduty b/library/monitoring/pagerduty deleted file mode 100644 index 5ca33717dc9..00000000000 --- a/library/monitoring/pagerduty +++ /dev/null @@ -1,232 +0,0 @@ -#!/usr/bin/python - -DOCUMENTATION = ''' - -module: pagerduty -short_description: Create PagerDuty maintenance windows -description: - - This module will let you create PagerDuty maintenance windows -version_added: "1.2" -author: Justin Johns -requirements: - - PagerDuty API access -options: - state: - description: - - Create a maintenance window or get a list of ongoing windows. - required: true - default: null - choices: [ "running", "started", "ongoing" ] - aliases: [] - name: - description: - - PagerDuty unique subdomain. - required: true - default: null - choices: [] - aliases: [] - user: - description: - - PagerDuty user ID. - required: true - default: null - choices: [] - aliases: [] - passwd: - description: - - PagerDuty user password. - required: true - default: null - choices: [] - aliases: [] - token: - description: - - A pagerduty token, generated on the pagerduty site. Can be used instead of - user/passwd combination. - required: true - default: null - choices: [] - aliases: [] - version_added: '1.8' - requester_id: - description: - - ID of user making the request. Only needed when using a token and creating a maintenance_window. - required: true - default: null - choices: [] - aliases: [] - version_added: '1.8' - service: - description: - - PagerDuty service ID. - required: false - default: null - choices: [] - aliases: [] - hours: - description: - - Length of maintenance window in hours. - required: false - default: 1 - choices: [] - aliases: [] - minutes: - description: - - Maintenance window in minutes (this is added to the hours). - required: false - default: 0 - choices: [] - aliases: [] - version_added: '1.8' - desc: - description: - - Short description of maintenance window. - required: false - default: Created by Ansible - choices: [] - aliases: [] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 - -notes: - - This module does not yet have support to end maintenance windows. -''' - -EXAMPLES=''' -# List ongoing maintenance windows using a user/passwd -- pagerduty: name=companyabc user=example@example.com passwd=password123 state=ongoing - -# List ongoing maintenance windows using a token -- pagerduty: name=companyabc token=xxxxxxxxxxxxxx state=ongoing - -# Create a 1 hour maintenance window for service FOO123, using a user/passwd -- pagerduty: name=companyabc - user=example@example.com - passwd=password123 - state=running - service=FOO123 - -# Create a 5 minute maintenance window for service FOO123, using a token -- pagerduty: name=companyabc - token=xxxxxxxxxxxxxx - hours=0 - minutes=5 - state=running - service=FOO123 - - -# Create a 4 hour maintenance window for service FOO123 with the description "deployment". -- pagerduty: name=companyabc - user=example@example.com - passwd=password123 - state=running - service=FOO123 - hours=4 - desc=deployment -''' - -import json -import datetime -import base64 - -def auth_header(user, passwd, token): - if token: - return "Token token=%s" % token - - auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '') - return "Basic %s" % auth - -def ongoing(module, name, user, passwd, token): - url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/ongoing" - headers = {"Authorization": auth_header(user, passwd, token)} - - response, info = fetch_url(module, url, headers=headers) - if info['status'] != 200: - module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg']) - - return False, response.read() - - -def create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc): - now = datetime.datetime.utcnow() - later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes)) - start = now.strftime("%Y-%m-%dT%H:%M:%SZ") - end = later.strftime("%Y-%m-%dT%H:%M:%SZ") - - url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows" - headers = { - 'Authorization': auth_header(user, passwd, token), - 'Content-Type' : 'application/json', - } - request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': [service]}} - if requester_id: - request_data['requester_id'] = requester_id - else: - if token: - module.fail_json(msg="requester_id is required when using a token") - - data = json.dumps(request_data) - response, info = fetch_url(module, url, data=data, headers=headers, method='POST') - if info['status'] != 200: - module.fail_json(msg="failed to create the window: %s" % info['msg']) - - return False, response.read() - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(required=True, choices=['running', 'started', 'ongoing']), - name=dict(required=True), - user=dict(required=False), - passwd=dict(required=False), - token=dict(required=False), - service=dict(required=False), - requester_id=dict(required=False), - hours=dict(default='1', required=False), - minutes=dict(default='0', required=False), - desc=dict(default='Created by Ansible', required=False), - validate_certs = dict(default='yes', type='bool'), - ) - ) - - state = module.params['state'] - name = module.params['name'] - user = module.params['user'] - passwd = module.params['passwd'] - token = module.params['token'] - service = module.params['service'] - hours = module.params['hours'] - minutes = module.params['minutes'] - token = module.params['token'] - desc = module.params['desc'] - requester_id = module.params['requester_id'] - - if not token and not (user or passwd): - module.fail_json(msg="neither user and passwd nor token specified") - - if state == "running" or state == "started": - if not service: - module.fail_json(msg="service not specified") - (rc, out) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc) - - if state == "ongoing": - (rc, out) = ongoing(module, name, user, passwd, token) - - if rc != 0: - module.fail_json(msg="failed", result=out) - - module.exit_json(msg="success", result=out) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/monitoring/pingdom b/library/monitoring/pingdom deleted file mode 100644 index 6f658cd9505..00000000000 --- a/library/monitoring/pingdom +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/python - -DOCUMENTATION = ''' - -module: pingdom -short_description: Pause/unpause Pingdom alerts -description: - - This module will let you pause/unpause Pingdom alerts -version_added: "1.2" -author: Justin Johns -requirements: - - "This pingdom python library: https://github.com/mbabineau/pingdom-python" -options: - state: - description: - - Define whether or not the check should be running or paused. - required: true - default: null - choices: [ "running", "paused" ] - aliases: [] - checkid: - description: - - Pingdom ID of the check. - required: true - default: null - choices: [] - aliases: [] - uid: - description: - - Pingdom user ID. - required: true - default: null - choices: [] - aliases: [] - passwd: - description: - - Pingdom user password. - required: true - default: null - choices: [] - aliases: [] - key: - description: - - Pingdom API key. - required: true - default: null - choices: [] - aliases: [] -notes: - - This module does not yet have support to add/remove checks. -''' - -EXAMPLES = ''' -# Pause the check with the ID of 12345. -- pingdom: uid=example@example.com - passwd=password123 - key=apipassword123 - checkid=12345 - state=paused - -# Unpause the check with the ID of 12345. -- pingdom: uid=example@example.com - passwd=password123 - key=apipassword123 - checkid=12345 - state=running -''' - -try: - import pingdom - HAS_PINGDOM = True -except: - HAS_PINGDOM = False - - - -def pause(checkid, uid, passwd, key): - - c = pingdom.PingdomConnection(uid, passwd, key) - c.modify_check(checkid, paused=True) - check = c.get_check(checkid) - name = check.name - result = check.status - #if result != "paused": # api output buggy - accept raw exception for now - # return (True, name, result) - return (False, name, result) - - -def unpause(checkid, uid, passwd, key): - - c = pingdom.PingdomConnection(uid, passwd, key) - c.modify_check(checkid, paused=False) - check = c.get_check(checkid) - name = check.name - result = check.status - #if result != "up": # api output buggy - accept raw exception for now - # return (True, name, result) - return (False, name, result) - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']), - checkid=dict(required=True), - uid=dict(required=True), - passwd=dict(required=True), - key=dict(required=True) - ) - ) - - if not HAS_PINGDOM: - module.fail_json(msg="Missing requried pingdom module (check docs)") - - checkid = module.params['checkid'] - state = module.params['state'] - uid = module.params['uid'] - passwd = module.params['passwd'] - key = module.params['key'] - - if (state == "paused" or state == "stopped"): - (rc, name, result) = pause(checkid, uid, passwd, key) - - if (state == "running" or state == "started"): - (rc, name, result) = unpause(checkid, uid, passwd, key) - - if rc != 0: - module.fail_json(checkid=checkid, name=name, status=result) - - module.exit_json(checkid=checkid, name=name, status=result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/monitoring/rollbar_deployment b/library/monitoring/rollbar_deployment deleted file mode 100644 index 772e78fc5c2..00000000000 --- a/library/monitoring/rollbar_deployment +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2014, Max Riveiro, -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rollbar_deployment -version_added: 1.6 -author: Max Riveiro -short_description: Notify Rollbar about app deployments -description: - - Notify Rollbar about app deployments - (see https://rollbar.com/docs/deploys_other/) -options: - token: - description: - - Your project access token. - required: true - environment: - description: - - Name of the environment being deployed, e.g. 'production'. - required: true - revision: - description: - - Revision number/sha being deployed. - required: true - user: - description: - - User who deployed. - required: false - rollbar_user: - description: - - Rollbar username of the user who deployed. - required: false - comment: - description: - - Deploy comment (e.g. what is being deployed). - required: false - url: - description: - - Optional URL to submit the notification to. - required: false - default: 'https://api.rollbar.com/api/1/deploy/' - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. - This should only be used on personally controlled sites using - self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] -''' - -EXAMPLES = ''' -- rollbar_deployment: token=AAAAAA - environment='staging' - user='ansible' - revision=4.2, - rollbar_user='admin', - comment='Test Deploy' -''' - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True), - environment=dict(required=True), - revision=dict(required=True), - user=dict(required=False), - rollbar_user=dict(required=False), - comment=dict(required=False), - url=dict( - required=False, - default='https://api.rollbar.com/api/1/deploy/' - ), - validate_certs=dict(default='yes', type='bool'), - ), - supports_check_mode=True - ) - - if module.check_mode: - module.exit_json(changed=True) - - params = dict( - access_token=module.params['token'], - environment=module.params['environment'], - revision=module.params['revision'] - ) - - if module.params['user']: - params['local_username'] = module.params['user'] - - if module.params['rollbar_user']: - params['rollbar_username'] = module.params['rollbar_user'] - - if module.params['comment']: - params['comment'] = module.params['comment'] - - url = module.params.get('url') - - try: - data = urllib.urlencode(params) - response, info = fetch_url(module, url, data=data) - except Exception, e: - module.fail_json(msg='Unable to notify Rollbar: %s' % e) - else: - if info['status'] == 200: - module.exit_json(changed=True) - else: - module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url)) - -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/monitoring/stackdriver b/library/monitoring/stackdriver deleted file mode 100644 index c36964dd9d2..00000000000 --- a/library/monitoring/stackdriver +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -DOCUMENTATION = ''' - -module: stackdriver -short_description: Send code deploy and annotation events to stackdriver -description: - - Send code deploy and annotation events to Stackdriver -version_added: "1.6" -author: Ben Whaley -options: - key: - description: - - API key. - required: true - default: null - event: - description: - - The type of event to send, either annotation or deploy - choices: ['annotation', 'deploy'] - required: false - default: null - revision_id: - description: - - The revision of the code that was deployed. Required for deploy events - required: false - default: null - deployed_by: - description: - - The person or robot responsible for deploying the code - required: false - default: "Ansible" - deployed_to: - description: - - "The environment code was deployed to. (ie: development, staging, production)" - required: false - default: null - repository: - description: - - The repository (or project) deployed - required: false - default: null - msg: - description: - - The contents of the annotation message, in plain text.  Limited to 256 characters. Required for annotation. - required: false - default: null - annotated_by: - description: - - The person or robot who the annotation should be attributed to. - required: false - default: "Ansible" - level: - description: - - one of INFO/WARN/ERROR, defaults to INFO if not supplied.  May affect display. - choices: ['INFO', 'WARN', 'ERROR'] - required: false - default: 'INFO' - instance_id: - description: - - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown - required: false - default: null - event_epoch: - description: - - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this." - required: false - default: null -''' - -EXAMPLES = ''' -- stackdriver: key=AAAAAA event=deploy deployed_to=production deployed_by=leeroyjenkins repository=MyWebApp revision_id=abcd123 - -- stackdriver: key=AAAAAA event=annotation msg="Greetings from Ansible" annotated_by=leeroyjenkins level=WARN instance_id=i-abcd1234 -''' - -# =========================================== -# Stackdriver module specific support methods. -# -try: - import json -except ImportError: - import simplejson as json - -def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None): - """Send a deploy event to Stackdriver""" - deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent" - - params = {} - params['revision_id'] = revision_id - params['deployed_by'] = deployed_by - if deployed_to: - params['deployed_to'] = deployed_to - if repository: - params['repository'] = repository - - return do_send_request(module, deploy_api, params, key) - -def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None): - """Send an annotation event to Stackdriver""" - annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent" - - params = {} - params['message'] = msg - if annotated_by: - params['annotated_by'] = annotated_by - if level: - params['level'] = level - if instance_id: - params['instance_id'] = instance_id - if event_epoch: - params['event_epoch'] = event_epoch - - return do_send_request(module, annotation_api, params, key) - -def do_send_request(module, url, params, key): - data = json.dumps(params) - headers = { - 'Content-Type': 'application/json', - 'x-stackdriver-apikey': key - } - response, info = fetch_url(module, url, headers=headers, data=data, method='POST') - if info['status'] != 200: - module.fail_json(msg="Unable to send msg: %s" % info['msg']) - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - key=dict(required=True), - event=dict(required=True, choices=['deploy', 'annotation']), - msg=dict(), - revision_id=dict(), - annotated_by=dict(default='Ansible'), - level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']), - instance_id=dict(), - event_epoch=dict(), - deployed_by=dict(default='Ansible'), - deployed_to=dict(), - repository=dict(), - ), - supports_check_mode=True - ) - - key = module.params["key"] - event = module.params["event"] - - # Annotation params - msg = module.params["msg"] - annotated_by = module.params["annotated_by"] - level = module.params["level"] - instance_id = module.params["instance_id"] - event_epoch = module.params["event_epoch"] - - # Deploy params - revision_id = module.params["revision_id"] - deployed_by = module.params["deployed_by"] - deployed_to = module.params["deployed_to"] - repository = module.params["repository"] - - ################################################################## - # deploy requires revision_id - # annotation requires msg - # We verify these manually - ################################################################## - - if event == 'deploy': - if not revision_id: - module.fail_json(msg="revision_id required for deploy events") - try: - send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository) - except Exception, e: - module.fail_json(msg="unable to sent deploy event: %s" % e) - - if event == 'annotation': - if not msg: - module.fail_json(msg="msg required for annotation events") - try: - send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch) - except Exception, e: - module.fail_json(msg="unable to sent annotation event: %s" % e) - - changed = True - module.exit_json(changed=changed, deployed_by=deployed_by) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/monitoring/zabbix_maintenance b/library/monitoring/zabbix_maintenance deleted file mode 100644 index e27091e0739..00000000000 --- a/library/monitoring/zabbix_maintenance +++ /dev/null @@ -1,371 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Alexander Bulimov -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - -DOCUMENTATION = ''' - -module: zabbix_maintenance -short_description: Create Zabbix maintenance windows -description: - - This module will let you create Zabbix maintenance windows. -version_added: "1.8" -author: Alexander Bulimov -requirements: - - zabbix-api python module -options: - state: - description: - - Create or remove a maintenance window. - required: true - default: null - choices: [ "present", "absent" ] - server_url: - description: - - Url of Zabbix server, with protocol (http or https). - C(url) is an alias for C(server_url). - required: true - default: null - aliases: [ "url" ] - login_user: - description: - - Zabbix user name. - required: true - default: null - login_password: - description: - - Zabbix user password. - required: true - default: null - host_names: - description: - - Hosts to manage maintenance window for. - Separate multiple hosts with commas. - C(host_name) is an alias for C(host_names). - B(Required) option when C(state) is I(present) - and no C(host_groups) specified. - required: false - default: null - aliases: [ "host_name" ] - host_groups: - description: - - Host groups to manage maintenance window for. - Separate multiple groups with commas. - C(host_group) is an alias for C(host_groups). - B(Required) option when C(state) is I(present) - and no C(host_names) specified. - required: false - default: null - aliases: [ "host_group" ] - minutes: - description: - - Length of maintenance window in minutes. - required: false - default: 10 - name: - description: - - Unique name of maintenance window. - required: true - default: null - desc: - description: - - Short description of maintenance window. - required: true - default: Created by Ansible - collect_data: - description: - - Type of maintenance. With data collection, or without. - required: false - default: "true" -notes: - - Useful for setting hosts in maintenance mode before big update, - and removing maintenance window after update. - - Module creates maintenance window from now() to now() + minutes, - so if Zabbix server's time and host's time are not synchronized, - you will get strange results. - - Install required module with 'pip install zabbix-api' command. - - Checks existance only by maintenance name. -''' - -EXAMPLES = ''' -# Create maintenance window named "Update of www1" -# for host www1.example.com for 90 minutes -- zabbix_maintenance: name="Update of www1" - host_name=www1.example.com - state=present - minutes=90 - server_url=https://monitoring.example.com - login_user=ansible - login_password=pAsSwOrD - -# Create maintenance window named "Mass update" -# for host www1.example.com and host groups Office and Dev -- zabbix_maintenance: name="Update of www1" - host_name=www1.example.com - host_groups=Office,Dev - state=present - server_url=https://monitoring.example.com - login_user=ansible - login_password=pAsSwOrD - -# Create maintenance window named "update" -# for hosts www1.example.com and db1.example.com and without data collection. -- zabbix_maintenance: name=update - host_names=www1.example.com,db1.example.com - state=present - collect_data=false - server_url=https://monitoring.example.com - login_user=ansible - login_password=pAsSwOrD - -# Remove maintenance window named "Test1" -- zabbix_maintenance: name=Test1 - state=absent - server_url=https://monitoring.example.com - login_user=ansible - login_password=pAsSwOrD -''' - -import datetime -import time - -try: - from zabbix_api import ZabbixAPI - HAS_ZABBIX_API = True -except ImportError: - HAS_ZABBIX_API = False - - -def create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc): - end_time = start_time + period - try: - zbx.maintenance.create( - { - "groupids": group_ids, - "hostids": host_ids, - "name": name, - "maintenance_type": maintenance_type, - "active_since": str(start_time), - "active_till": str(end_time), - "description": desc, - "timeperiods": [{ - "timeperiod_type": "0", - "start_date": str(start_time), - "period": str(period), - }] - } - ) - except BaseException as e: - return 1, None, str(e) - return 0, None, None - - -def get_maintenance_id(zbx, name): - try: - result = zbx.maintenance.get( - { - "filter": - { - "name": name, - } - } - ) - except BaseException as e: - return 1, None, str(e) - - maintenance_ids = [] - for res in result: - maintenance_ids.append(res["maintenanceid"]) - - return 0, maintenance_ids, None - - -def delete_maintenance(zbx, maintenance_id): - try: - zbx.maintenance.delete(maintenance_id) - except BaseException as e: - return 1, None, str(e) - return 0, None, None - - -def check_maintenance(zbx, name): - try: - result = zbx.maintenance.exists( - { - "name": name - } - ) - except BaseException as e: - return 1, None, str(e) - return 0, result, None - - -def get_group_ids(zbx, host_groups): - group_ids = [] - for group in host_groups: - try: - result = zbx.hostgroup.get( - { - "output": "extend", - "filter": - { - "name": group - } - } - ) - except BaseException as e: - return 1, None, str(e) - - if not result: - return 1, None, "Group id for group %s not found" % group - - group_ids.append(result[0]["groupid"]) - - return 0, group_ids, None - - -def get_host_ids(zbx, host_names): - host_ids = [] - for host in host_names: - try: - result = zbx.host.get( - { - "output": "extend", - "filter": - { - "name": host - } - } - ) - except BaseException as e: - return 1, None, str(e) - - if not result: - return 1, None, "Host id for host %s not found" % host - - host_ids.append(result[0]["hostid"]) - - return 0, host_ids, None - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(required=True, default=None, choices=['present', 'absent']), - server_url=dict(required=True, default=None, aliases=['url']), - host_names=dict(type='list', required=False, default=None, aliases=['host_name']), - minutes=dict(type='int', required=False, default=10), - host_groups=dict(type='list', required=False, default=None, aliases=['host_group']), - login_user=dict(required=True, default=None), - login_password=dict(required=True, default=None), - name=dict(required=True, default=None), - desc=dict(required=False, default="Created by Ansible"), - collect_data=dict(type='bool', required=False, default=True), - ), - supports_check_mode=True, - ) - - if not HAS_ZABBIX_API: - module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") - - host_names = module.params['host_names'] - host_groups = module.params['host_groups'] - state = module.params['state'] - login_user = module.params['login_user'] - login_password = module.params['login_password'] - minutes = module.params['minutes'] - name = module.params['name'] - desc = module.params['desc'] - server_url = module.params['server_url'] - collect_data = module.params['collect_data'] - if collect_data: - maintenance_type = 0 - else: - maintenance_type = 1 - - try: - zbx = ZabbixAPI(server_url) - zbx.login(login_user, login_password) - except BaseException as e: - module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) - - changed = False - - if state == "present": - - now = datetime.datetime.now() - start_time = time.mktime(now.timetuple()) - period = 60 * int(minutes) # N * 60 seconds - - if host_groups: - (rc, group_ids, error) = get_group_ids(zbx, host_groups) - if rc != 0: - module.fail_json(msg="Failed to get group_ids: %s" % error) - else: - group_ids = [] - - if host_names: - (rc, host_ids, error) = get_host_ids(zbx, host_names) - if rc != 0: - module.fail_json(msg="Failed to get host_ids: %s" % error) - else: - host_ids = [] - - (rc, exists, error) = check_maintenance(zbx, name) - if rc != 0: - module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error)) - - if not exists: - if not host_names and not host_groups: - module.fail_json(msg="At least one host_name or host_group must be defined for each created maintenance.") - - if module.check_mode: - changed = True - else: - (rc, _, error) = create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc) - if rc == 0: - changed = True - else: - module.fail_json(msg="Failed to create maintenance: %s" % error) - - if state == "absent": - - (rc, exists, error) = check_maintenance(zbx, name) - if rc != 0: - module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error)) - - if exists: - (rc, maintenance, error) = get_maintenance_id(zbx, name) - if rc != 0: - module.fail_json(msg="Failed to get maintenance id: %s" % error) - - if maintenance: - if module.check_mode: - changed = True - else: - (rc, _, error) = delete_maintenance(zbx, maintenance) - if rc == 0: - changed = True - else: - module.fail_json(msg="Failed to remove maintenance: %s" % error) - - module.exit_json(changed=changed) - -from ansible.module_utils.basic import * -main() diff --git a/library/net_infrastructure/a10_server b/library/net_infrastructure/a10_server deleted file mode 100644 index 65410536eef..00000000000 --- a/library/net_infrastructure/a10_server +++ /dev/null @@ -1,269 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Ansible module to manage A10 Networks slb server objects -(c) 2014, Mischa Peters - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: a10_server -version_added: 1.8 -short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices -description: - - Manage slb server objects on A10 Networks devices via aXAPI -author: Mischa Peters -notes: - - Requires A10 Networks aXAPI 2.1 -options: - host: - description: - - hostname or ip of your A10 Networks device - required: true - default: null - aliases: [] - choices: [] - username: - description: - - admin account of your A10 Networks device - required: true - default: null - aliases: ['user', 'admin'] - choices: [] - password: - description: - - admin password of your A10 Networks device - required: true - default: null - aliases: ['pass', 'pwd'] - choices: [] - server_name: - description: - - slb server name - required: true - default: null - aliases: ['server'] - choices: [] - server_ip: - description: - - slb server IP address - required: false - default: null - aliases: ['ip', 'address'] - choices: [] - server_status: - description: - - slb virtual server status - required: false - default: enable - aliases: ['status'] - choices: ['enabled', 'disabled'] - server_ports: - description: - - A list of ports to create for the server. Each list item should be a - dictionary which specifies the C(port:) and C(protocol:), but can also optionally - specify the C(status:). See the examples below for details. This parameter is - required when C(state) is C(present). - required: false - default: null - aliases: [] - choices: [] - state: - description: - - create, update or remove slb server - required: false - default: present - aliases: [] - choices: ['present', 'absent'] -''' - -EXAMPLES = ''' -# Create a new server -- a10_server: - host: a10.mydomain.com - username: myadmin - password: mypassword - server: test - server_ip: 1.1.1.100 - server_ports: - - port_num: 8080 - protocol: tcp - - port_num: 8443 - protocol: TCP - -''' - -VALID_PORT_FIELDS = ['port_num', 'protocol', 'status'] - -def validate_ports(module, ports): - for item in ports: - for key in item: - if key not in VALID_PORT_FIELDS: - module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS))) - - # validate the port number is present and an integer - if 'port_num' in item: - try: - item['port_num'] = int(item['port_num']) - except: - module.fail_json(msg="port_num entries in the port definitions must be integers") - else: - module.fail_json(msg="port definitions must define the port_num field") - - # validate the port protocol is present, and convert it to - # the internal API integer value (and validate it) - if 'protocol' in item: - protocol = axapi_get_port_protocol(item['protocol']) - if not protocol: - module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_PORT_PROTOCOLS)) - else: - item['protocol'] = protocol - else: - module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_PORT_PROTOCOLS)) - - # convert the status to the internal API integer value - if 'status' in item: - item['status'] = axapi_enabled_disabled(item['status']) - else: - item['status'] = 1 - - -def main(): - argument_spec = a10_argument_spec() - argument_spec.update(url_argument_spec()) - argument_spec.update( - dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - server_name=dict(type='str', aliases=['server'], required=True), - server_ip=dict(type='str', aliases=['ip', 'address']), - server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']), - server_ports=dict(type='list', aliases=['port'], default=[]), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=False - ) - - host = module.params['host'] - username = module.params['username'] - password = module.params['password'] - state = module.params['state'] - write_config = module.params['write_config'] - slb_server = module.params['server_name'] - slb_server_ip = module.params['server_ip'] - slb_server_status = module.params['server_status'] - slb_server_ports = module.params['server_ports'] - - if slb_server is None: - module.fail_json(msg='server_name is required') - - axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host - session_url = axapi_authenticate(module, axapi_base_url, username, password) - - # validate the ports data structure - validate_ports(module, slb_server_ports) - - json_post = { - 'server': { - 'name': slb_server, - 'host': slb_server_ip, - 'status': axapi_enabled_disabled(slb_server_status), - 'port_list': slb_server_ports, - } - } - - slb_server_data = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server})) - slb_server_exists = not axapi_failure(slb_server_data) - - changed = False - if state == 'present': - if not slb_server_ip: - module.fail_json(msg='you must specify an IP address when creating a server') - - if not slb_server_exists: - result = axapi_call(module, session_url + '&method=slb.server.create', json.dumps(json_post)) - if axapi_failure(result): - module.fail_json(msg="failed to create the server: %s" % result['response']['err']['msg']) - changed = True - else: - def needs_update(src_ports, dst_ports): - ''' - Checks to determine if the port definitions of the src_ports - array are in or different from those in dst_ports. If there is - a difference, this function returns true, otherwise false. - ''' - for src_port in src_ports: - found = False - different = False - for dst_port in dst_ports: - if src_port['port_num'] == dst_port['port_num']: - found = True - for valid_field in VALID_PORT_FIELDS: - if src_port[valid_field] != dst_port[valid_field]: - different = True - break - if found or different: - break - if not found or different: - return True - # every port from the src exists in the dst, and none of them were different - return False - - defined_ports = slb_server_data.get('server', {}).get('port_list', []) - - # we check for a needed update both ways, in case ports - # are missing from either the ones specified by the user - # or from those on the device - if needs_update(defined_ports, slb_server_ports) or needs_update(slb_server_ports, defined_ports): - result = axapi_call(module, session_url + '&method=slb.server.update', json.dumps(json_post)) - if axapi_failure(result): - module.fail_json(msg="failed to update the server: %s" % result['response']['err']['msg']) - changed = True - - # if we changed things, get the full info regarding - # the service group for the return data below - if changed: - result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server})) - else: - result = slb_server_data - elif state == 'absent': - if slb_server_exists: - result = axapi_call(module, session_url + '&method=slb.server.delete', json.dumps({'name': slb_server})) - changed = True - else: - result = dict(msg="the server was not present") - - # if the config has changed, save the config unless otherwise requested - if changed and write_config: - write_result = axapi_call(module, session_url + '&method=system.action.write_memory') - if axapi_failure(write_result): - module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) - - # log out of the session nicely and exit - axapi_call(module, session_url + '&method=session.close') - module.exit_json(changed=changed, content=result) - -# standard ansible module imports -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.a10 import * - -main() diff --git a/library/net_infrastructure/a10_service_group b/library/net_infrastructure/a10_service_group deleted file mode 100644 index 3627e2d12b8..00000000000 --- a/library/net_infrastructure/a10_service_group +++ /dev/null @@ -1,341 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Ansible module to manage A10 Networks slb service-group objects -(c) 2014, Mischa Peters - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: a10_service_group -version_added: 1.8 -short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices -description: - - Manage slb service-group objects on A10 Networks devices via aXAPI -author: Mischa Peters -notes: - - Requires A10 Networks aXAPI 2.1 - - When a server doesn't exist and is added to the service-group the server will be created -options: - host: - description: - - hostname or ip of your A10 Networks device - required: true - default: null - aliases: [] - choices: [] - username: - description: - - admin account of your A10 Networks device - required: true - default: null - aliases: ['user', 'admin'] - choices: [] - password: - description: - - admin password of your A10 Networks device - required: true - default: null - aliases: ['pass', 'pwd'] - choices: [] - service_group: - description: - - slb service-group name - required: true - default: null - aliases: ['service', 'pool', 'group'] - choices: [] - service_group_protocol: - description: - - slb service-group protocol - required: false - default: tcp - aliases: ['proto', 'protocol'] - choices: ['tcp', 'udp'] - service_group_method: - description: - - slb service-group loadbalancing method - required: false - default: round-robin - aliases: ['method'] - choices: ['round-robin', 'weighted-rr', 'least-connection', 'weighted-least-connection', 'service-least-connection', 'service-weighted-least-connection', 'fastest-response', 'least-request', 'round-robin-strict', 'src-ip-only-hash', 'src-ip-hash'] - servers: - description: - - A list of servers to add to the service group. Each list item should be a - dictionary which specifies the C(server:) and C(port:), but can also optionally - specify the C(status:). See the examples below for details. - required: false - default: null - aliases: [] - choices: [] - write_config: - description: - - If C(yes), any changes will cause a write of the running configuration - to non-volatile memory. This will save I(all) configuration changes, - including those that may have been made manually or through other modules, - so care should be taken when specifying C(yes). - required: false - default: "no" - choices: ["yes", "no"] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled devices using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - -''' - -EXAMPLES = ''' -# Create a new service-group -- a10_service_group: - host: a10.mydomain.com - username: myadmin - password: mypassword - service_group: sg-80-tcp - servers: - - server: foo1.mydomain.com - port: 8080 - - server: foo2.mydomain.com - port: 8080 - - server: foo3.mydomain.com - port: 8080 - - server: foo4.mydomain.com - port: 8080 - status: disabled - -''' - -VALID_SERVICE_GROUP_FIELDS = ['name', 'protocol', 'lb_method'] -VALID_SERVER_FIELDS = ['server', 'port', 'status'] - -def validate_servers(module, servers): - for item in servers: - for key in item: - if key not in VALID_SERVER_FIELDS: - module.fail_json(msg="invalid server field (%s), must be one of: %s" % (key, ','.join(VALID_SERVER_FIELDS))) - - # validate the server name is present - if 'server' not in item: - module.fail_json(msg="server definitions must define the server field") - - # validate the port number is present and an integer - if 'port' in item: - try: - item['port'] = int(item['port']) - except: - module.fail_json(msg="server port definitions must be integers") - else: - module.fail_json(msg="server definitions must define the port field") - - # convert the status to the internal API integer value - if 'status' in item: - item['status'] = axapi_enabled_disabled(item['status']) - else: - item['status'] = 1 - - -def main(): - argument_spec = a10_argument_spec() - argument_spec.update(url_argument_spec()) - argument_spec.update( - dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - service_group=dict(type='str', aliases=['service', 'pool', 'group'], required=True), - service_group_protocol=dict(type='str', default='tcp', aliases=['proto', 'protocol'], choices=['tcp', 'udp']), - service_group_method=dict(type='str', default='round-robin', - aliases=['method'], - choices=['round-robin', - 'weighted-rr', - 'least-connection', - 'weighted-least-connection', - 'service-least-connection', - 'service-weighted-least-connection', - 'fastest-response', - 'least-request', - 'round-robin-strict', - 'src-ip-only-hash', - 'src-ip-hash']), - servers=dict(type='list', aliases=['server', 'member'], default=[]), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=False - ) - - host = module.params['host'] - username = module.params['username'] - password = module.params['password'] - state = module.params['state'] - write_config = module.params['write_config'] - slb_service_group = module.params['service_group'] - slb_service_group_proto = module.params['service_group_protocol'] - slb_service_group_method = module.params['service_group_method'] - slb_servers = module.params['servers'] - - if slb_service_group is None: - module.fail_json(msg='service_group is required') - - axapi_base_url = 'https://' + host + '/services/rest/V2.1/?format=json' - load_balancing_methods = {'round-robin': 0, - 'weighted-rr': 1, - 'least-connection': 2, - 'weighted-least-connection': 3, - 'service-least-connection': 4, - 'service-weighted-least-connection': 5, - 'fastest-response': 6, - 'least-request': 7, - 'round-robin-strict': 8, - 'src-ip-only-hash': 14, - 'src-ip-hash': 15} - - if not slb_service_group_proto or slb_service_group_proto.lower() == 'tcp': - protocol = 2 - else: - protocol = 3 - - # validate the server data list structure - validate_servers(module, slb_servers) - - json_post = { - 'service_group': { - 'name': slb_service_group, - 'protocol': protocol, - 'lb_method': load_balancing_methods[slb_service_group_method], - } - } - - # first we authenticate to get a session id - session_url = axapi_authenticate(module, axapi_base_url, username, password) - - # then we check to see if the specified group exists - slb_result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group})) - slb_service_group_exist = not axapi_failure(slb_result) - - changed = False - if state == 'present': - # before creating/updating we need to validate that servers - # defined in the servers list exist to prevent errors - checked_servers = [] - for server in slb_servers: - result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': server['server']})) - if axapi_failure(result): - module.fail_json(msg="the server %s specified in the servers list does not exist" % server['server']) - checked_servers.append(server['server']) - - if not slb_service_group_exist: - result = axapi_call(module, session_url + '&method=slb.service_group.create', json.dumps(json_post)) - if axapi_failure(result): - module.fail_json(msg=result['response']['err']['msg']) - changed = True - else: - # check to see if the service group definition without the - # server members is different, and update that individually - # if it needs it - do_update = False - for field in VALID_SERVICE_GROUP_FIELDS: - if json_post['service_group'][field] != slb_result['service_group'][field]: - do_update = True - break - - if do_update: - result = axapi_call(module, session_url + '&method=slb.service_group.update', json.dumps(json_post)) - if axapi_failure(result): - module.fail_json(msg=result['response']['err']['msg']) - changed = True - - # next we pull the defined list of servers out of the returned - # results to make it a bit easier to iterate over - defined_servers = slb_result.get('service_group', {}).get('member_list', []) - - # next we add/update new member servers from the user-specified - # list if they're different or not on the target device - for server in slb_servers: - found = False - different = False - for def_server in defined_servers: - if server['server'] == def_server['server']: - found = True - for valid_field in VALID_SERVER_FIELDS: - if server[valid_field] != def_server[valid_field]: - different = True - break - if found or different: - break - # add or update as required - server_data = { - "name": slb_service_group, - "member": server, - } - if not found: - result = axapi_call(module, session_url + '&method=slb.service_group.member.create', json.dumps(server_data)) - changed = True - elif different: - result = axapi_call(module, session_url + '&method=slb.service_group.member.update', json.dumps(server_data)) - changed = True - - # finally, remove any servers that are on the target - # device but were not specified in the list given - for server in defined_servers: - found = False - for slb_server in slb_servers: - if server['server'] == slb_server['server']: - found = True - break - # remove if not found - server_data = { - "name": slb_service_group, - "member": server, - } - if not found: - result = axapi_call(module, session_url + '&method=slb.service_group.member.delete', json.dumps(server_data)) - changed = True - - # if we changed things, get the full info regarding - # the service group for the return data below - if changed: - result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group})) - else: - result = slb_result - elif state == 'absent': - if slb_service_group_exist: - result = axapi_call(module, session_url + '&method=slb.service_group.delete', json.dumps({'name': slb_service_group})) - changed = True - else: - result = dict(msg="the service group was not present") - - # if the config has changed, save the config unless otherwise requested - if changed and write_config: - write_result = axapi_call(module, session_url + '&method=system.action.write_memory') - if axapi_failure(write_result): - module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) - - # log out of the session nicely and exit - axapi_call(module, session_url + '&method=session.close') - module.exit_json(changed=changed, content=result) - -# standard ansible module imports -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.a10 import * - -main() diff --git a/library/net_infrastructure/a10_virtual_server b/library/net_infrastructure/a10_virtual_server deleted file mode 100644 index 3d807c098cf..00000000000 --- a/library/net_infrastructure/a10_virtual_server +++ /dev/null @@ -1,299 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Ansible module to manage A10 Networks slb virtual server objects -(c) 2014, Mischa Peters - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: a10_virtual_server -version_added: 1.8 -short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices -description: - - Manage slb virtual server objects on A10 Networks devices via aXAPI -author: Mischa Peters -notes: - - Requires A10 Networks aXAPI 2.1 -requirements: - - urllib2 - - re -options: - host: - description: - - hostname or ip of your A10 Networks device - required: true - default: null - aliases: [] - choices: [] - username: - description: - - admin account of your A10 Networks device - required: true - default: null - aliases: ['user', 'admin'] - choices: [] - password: - description: - - admin password of your A10 Networks device - required: true - default: null - aliases: ['pass', 'pwd'] - choices: [] - virtual_server: - description: - - slb virtual server name - required: true - default: null - aliases: ['vip', 'virtual'] - choices: [] - virtual_server_ip: - description: - - slb virtual server ip address - required: false - default: null - aliases: ['ip', 'address'] - choices: [] - virtual_server_status: - description: - - slb virtual server status - required: false - default: enable - aliases: ['status'] - choices: ['enabled', 'disabled'] - virtual_server_ports: - description: - - A list of ports to create for the virtual server. Each list item should be a - dictionary which specifies the C(port:) and C(type:), but can also optionally - specify the C(service_group:) as well as the C(status:). See the examples - below for details. This parameter is required when C(state) is C(present). - required: false - write_config: - description: - - If C(yes), any changes will cause a write of the running configuration - to non-volatile memory. This will save I(all) configuration changes, - including those that may have been made manually or through other modules, - so care should be taken when specifying C(yes). - required: false - default: "no" - choices: ["yes", "no"] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled devices using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - -''' - -EXAMPLES = ''' -# Create a new virtual server -- a10_virtual_server: - host: a10.mydomain.com - username: myadmin - password: mypassword - virtual_server: vserver1 - virtual_server_ip: 1.1.1.1 - virtual_server_ports: - - port: 80 - protocol: TCP - service_group: sg-80-tcp - - port: 443 - protocol: HTTPS - service_group: sg-443-https - - port: 8080 - protocol: http - status: disabled - -''' - -VALID_PORT_FIELDS = ['port', 'protocol', 'service_group', 'status'] - -def validate_ports(module, ports): - for item in ports: - for key in item: - if key not in VALID_PORT_FIELDS: - module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS))) - - # validate the port number is present and an integer - if 'port' in item: - try: - item['port'] = int(item['port']) - except: - module.fail_json(msg="port definitions must be integers") - else: - module.fail_json(msg="port definitions must define the port field") - - # validate the port protocol is present, and convert it to - # the internal API integer value (and validate it) - if 'protocol' in item: - protocol = axapi_get_vport_protocol(item['protocol']) - if not protocol: - module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_VPORT_PROTOCOLS)) - else: - item['protocol'] = protocol - else: - module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_VPORT_PROTOCOLS)) - - # convert the status to the internal API integer value - if 'status' in item: - item['status'] = axapi_enabled_disabled(item['status']) - else: - item['status'] = 1 - - # ensure the service_group field is at least present - if 'service_group' not in item: - item['service_group'] = '' - -def main(): - argument_spec = a10_argument_spec() - argument_spec.update(url_argument_spec()) - argument_spec.update( - dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - virtual_server=dict(type='str', aliases=['vip', 'virtual'], required=True), - virtual_server_ip=dict(type='str', aliases=['ip', 'address'], required=True), - virtual_server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']), - virtual_server_ports=dict(type='list', required=True), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=False - ) - - host = module.params['host'] - username = module.params['username'] - password = module.params['password'] - state = module.params['state'] - write_config = module.params['write_config'] - slb_virtual = module.params['virtual_server'] - slb_virtual_ip = module.params['virtual_server_ip'] - slb_virtual_status = module.params['virtual_server_status'] - slb_virtual_ports = module.params['virtual_server_ports'] - - if slb_virtual is None: - module.fail_json(msg='virtual_server is required') - - validate_ports(module, slb_virtual_ports) - - axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host - session_url = axapi_authenticate(module, axapi_base_url, username, password) - - slb_virtual_data = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual})) - slb_virtual_exists = not axapi_failure(slb_virtual_data) - - changed = False - if state == 'present': - json_post = { - 'virtual_server': { - 'name': slb_virtual, - 'address': slb_virtual_ip, - 'status': axapi_enabled_disabled(slb_virtual_status), - 'vport_list': slb_virtual_ports, - } - } - - # before creating/updating we need to validate that any - # service groups defined in the ports list exist since - # since the API will still create port definitions for - # them while indicating a failure occurred - checked_service_groups = [] - for port in slb_virtual_ports: - if 'service_group' in port and port['service_group'] not in checked_service_groups: - # skip blank service group entries - if port['service_group'] == '': - continue - result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': port['service_group']})) - if axapi_failure(result): - module.fail_json(msg="the service group %s specified in the ports list does not exist" % port['service_group']) - checked_service_groups.append(port['service_group']) - - if not slb_virtual_exists: - result = axapi_call(module, session_url + '&method=slb.virtual_server.create', json.dumps(json_post)) - if axapi_failure(result): - module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg']) - changed = True - else: - def needs_update(src_ports, dst_ports): - ''' - Checks to determine if the port definitions of the src_ports - array are in or different from those in dst_ports. If there is - a difference, this function returns true, otherwise false. - ''' - for src_port in src_ports: - found = False - different = False - for dst_port in dst_ports: - if src_port['port'] == dst_port['port']: - found = True - for valid_field in VALID_PORT_FIELDS: - if src_port[valid_field] != dst_port[valid_field]: - different = True - break - if found or different: - break - if not found or different: - return True - # every port from the src exists in the dst, and none of them were different - return False - - defined_ports = slb_virtual_data.get('virtual_server', {}).get('vport_list', []) - - # we check for a needed update both ways, in case ports - # are missing from either the ones specified by the user - # or from those on the device - if needs_update(defined_ports, slb_virtual_ports) or needs_update(slb_virtual_ports, defined_ports): - result = axapi_call(module, session_url + '&method=slb.virtual_server.update', json.dumps(json_post)) - if axapi_failure(result): - module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg']) - changed = True - - # if we changed things, get the full info regarding - # the service group for the return data below - if changed: - result = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual})) - else: - result = slb_virtual_data - elif state == 'absent': - if slb_virtual_exists: - result = axapi_call(module, session_url + '&method=slb.virtual_server.delete', json.dumps({'name': slb_virtual})) - changed = True - else: - result = dict(msg="the virtual server was not present") - - # if the config has changed, save the config unless otherwise requested - if changed and write_config: - write_result = axapi_call(module, session_url + '&method=system.action.write_memory') - if axapi_failure(write_result): - module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) - - # log out of the session nicely and exit - axapi_call(module, session_url + '&method=session.close') - module.exit_json(changed=changed, content=result) - -# standard ansible module imports -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.a10 import * - -main() - diff --git a/library/net_infrastructure/bigip_facts b/library/net_infrastructure/bigip_facts deleted file mode 100755 index 99a1e31de68..00000000000 --- a/library/net_infrastructure/bigip_facts +++ /dev/null @@ -1,1670 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Matt Hite -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: bigip_facts -short_description: "Collect facts from F5 BIG-IP devices" -description: - - "Collect facts from F5 BIG-IP devices via iControl SOAP API" -version_added: "1.6" -author: Matt Hite -notes: - - "Requires BIG-IP software version >= 11.4" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" - - "Tested with manager and above account privilege level" - -requirements: - - bigsuds -options: - server: - description: - - BIG-IP host - required: true - default: null - choices: [] - aliases: [] - user: - description: - - BIG-IP username - required: true - default: null - choices: [] - aliases: [] - password: - description: - - BIG-IP password - required: true - default: null - choices: [] - aliases: [] - session: - description: - - BIG-IP session support; may be useful to avoid concurrency - issues in certain circumstances. - required: false - default: true - choices: [] - aliases: [] - include: - description: - - Fact category or list of categories to collect - required: true - default: null - choices: ['address_class', 'certificate', 'client_ssl_profile', - 'device_group', 'interface', 'key', 'node', 'pool', 'rule', - 'self_ip', 'software', 'system_info', 'traffic_group', - 'trunk', 'virtual_address', 'virtual_server', 'vlan'] - aliases: [] - filter: - description: - - Shell-style glob matching string used to filter fact keys. Not - applicable for software and system_info fact categories. - required: false - default: null - choices: [] - aliases: [] -''' - -EXAMPLES = ''' - -## playbook task examples: - ---- -# file bigip-test.yml -# ... -- hosts: bigip-test - tasks: - - name: Collect BIG-IP facts - local_action: > - bigip_facts - server=lb.mydomain.com - user=admin - password=mysecret - include=interface,vlan - -''' - -try: - import bigsuds - from suds import MethodNotFound -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -import fnmatch -import traceback -import re - -# =========================================== -# bigip_facts module specific support methods. -# - -class F5(object): - """F5 iControl class. - - F5 BIG-IP iControl API class. - - Attributes: - api: iControl API instance. - """ - - def __init__(self, host, user, password, session=False): - self.api = bigsuds.BIGIP(hostname=host, username=user, password=password) - if session: - self.start_session() - - def start_session(self): - self.api = self.api.with_session_id() - - def get_api(self): - return self.api - - def set_recursive_query_state(self, state): - self.api.System.Session.set_recursive_query_state(state) - - def get_recursive_query_state(self): - return self.api.System.Session.get_recursive_query_state() - - def enable_recursive_query_state(self): - self.set_recursive_query_state('STATE_ENABLED') - - def disable_recursive_query_state(self): - self.set_recursive_query_state('STATE_DISABLED') - - def set_active_folder(self, folder): - self.api.System.Session.set_active_folder(folder=folder) - - def get_active_folder(self): - return self.api.System.Session.get_active_folder() - - -class Interfaces(object): - """Interfaces class. - - F5 BIG-IP interfaces class. - - Attributes: - api: iControl API instance. - interfaces: A list of BIG-IP interface names. - """ - - def __init__(self, api, regex=None): - self.api = api - self.interfaces = api.Networking.Interfaces.get_list() - if regex: - re_filter = re.compile(regex) - self.interfaces = filter(re_filter.search, self.interfaces) - - def get_list(self): - return self.interfaces - - def get_active_media(self): - return self.api.Networking.Interfaces.get_active_media(self.interfaces) - - def get_actual_flow_control(self): - return self.api.Networking.Interfaces.get_actual_flow_control(self.interfaces) - - def get_bundle_state(self): - return self.api.Networking.Interfaces.get_bundle_state(self.interfaces) - - def get_description(self): - return self.api.Networking.Interfaces.get_description(self.interfaces) - - def get_dual_media_state(self): - return self.api.Networking.Interfaces.get_dual_media_state(self.interfaces) - - def get_enabled_state(self): - return self.api.Networking.Interfaces.get_enabled_state(self.interfaces) - - def get_if_index(self): - return self.api.Networking.Interfaces.get_if_index(self.interfaces) - - def get_learning_mode(self): - return self.api.Networking.Interfaces.get_learning_mode(self.interfaces) - - def get_lldp_admin_status(self): - return self.api.Networking.Interfaces.get_lldp_admin_status(self.interfaces) - - def get_lldp_tlvmap(self): - return self.api.Networking.Interfaces.get_lldp_tlvmap(self.interfaces) - - def get_mac_address(self): - return self.api.Networking.Interfaces.get_mac_address(self.interfaces) - - def get_media(self): - return self.api.Networking.Interfaces.get_media(self.interfaces) - - def get_media_option(self): - return self.api.Networking.Interfaces.get_media_option(self.interfaces) - - def get_media_option_sfp(self): - return self.api.Networking.Interfaces.get_media_option_sfp(self.interfaces) - - def get_media_sfp(self): - return self.api.Networking.Interfaces.get_media_sfp(self.interfaces) - - def get_media_speed(self): - return self.api.Networking.Interfaces.get_media_speed(self.interfaces) - - def get_media_status(self): - return self.api.Networking.Interfaces.get_media_status(self.interfaces) - - def get_mtu(self): - return self.api.Networking.Interfaces.get_mtu(self.interfaces) - - def get_phy_master_slave_mode(self): - return self.api.Networking.Interfaces.get_phy_master_slave_mode(self.interfaces) - - def get_prefer_sfp_state(self): - return self.api.Networking.Interfaces.get_prefer_sfp_state(self.interfaces) - - def get_flow_control(self): - return self.api.Networking.Interfaces.get_requested_flow_control(self.interfaces) - - def get_sflow_poll_interval(self): - return self.api.Networking.Interfaces.get_sflow_poll_interval(self.interfaces) - - def get_sflow_poll_interval_global(self): - return self.api.Networking.Interfaces.get_sflow_poll_interval_global(self.interfaces) - - def get_sfp_media_state(self): - return self.api.Networking.Interfaces.get_sfp_media_state(self.interfaces) - - def get_stp_active_edge_port_state(self): - return self.api.Networking.Interfaces.get_stp_active_edge_port_state(self.interfaces) - - def get_stp_enabled_state(self): - return self.api.Networking.Interfaces.get_stp_enabled_state(self.interfaces) - - def get_stp_link_type(self): - return self.api.Networking.Interfaces.get_stp_link_type(self.interfaces) - - def get_stp_protocol_detection_reset_state(self): - return self.api.Networking.Interfaces.get_stp_protocol_detection_reset_state(self.interfaces) - - -class SelfIPs(object): - """Self IPs class. - - F5 BIG-IP Self IPs class. - - Attributes: - api: iControl API instance. - self_ips: List of self IPs. - """ - - def __init__(self, api, regex=None): - self.api = api - self.self_ips = api.Networking.SelfIPV2.get_list() - if regex: - re_filter = re.compile(regex) - self.self_ips = filter(re_filter.search, self.self_ips) - - def get_list(self): - return self.self_ips - - def get_address(self): - return self.api.Networking.SelfIPV2.get_address(self.self_ips) - - def get_allow_access_list(self): - return self.api.Networking.SelfIPV2.get_allow_access_list(self.self_ips) - - def get_description(self): - return self.api.Networking.SelfIPV2.get_description(self.self_ips) - - def get_enforced_firewall_policy(self): - return self.api.Networking.SelfIPV2.get_enforced_firewall_policy(self.self_ips) - - def get_floating_state(self): - return self.api.Networking.SelfIPV2.get_floating_state(self.self_ips) - - def get_fw_rule(self): - return self.api.Networking.SelfIPV2.get_fw_rule(self.self_ips) - - def get_netmask(self): - return self.api.Networking.SelfIPV2.get_netmask(self.self_ips) - - def get_staged_firewall_policy(self): - return self.api.Networking.SelfIPV2.get_staged_firewall_policy(self.self_ips) - - def get_traffic_group(self): - return self.api.Networking.SelfIPV2.get_traffic_group(self.self_ips) - - def get_vlan(self): - return self.api.Networking.SelfIPV2.get_vlan(self.self_ips) - - def get_is_traffic_group_inherited(self): - return self.api.Networking.SelfIPV2.is_traffic_group_inherited(self.self_ips) - - -class Trunks(object): - """Trunks class. - - F5 BIG-IP trunks class. - - Attributes: - api: iControl API instance. - trunks: List of trunks. - """ - - def __init__(self, api, regex=None): - self.api = api - self.trunks = api.Networking.Trunk.get_list() - if regex: - re_filter = re.compile(regex) - self.trunks = filter(re_filter.search, self.trunks) - - def get_list(self): - return self.trunks - - def get_active_lacp_state(self): - return self.api.Networking.Trunk.get_active_lacp_state(self.trunks) - - def get_configured_member_count(self): - return self.api.Networking.Trunk.get_configured_member_count(self.trunks) - - def get_description(self): - return self.api.Networking.Trunk.get_description(self.trunks) - - def get_distribution_hash_option(self): - return self.api.Networking.Trunk.get_distribution_hash_option(self.trunks) - - def get_interface(self): - return self.api.Networking.Trunk.get_interface(self.trunks) - - def get_lacp_enabled_state(self): - return self.api.Networking.Trunk.get_lacp_enabled_state(self.trunks) - - def get_lacp_timeout_option(self): - return self.api.Networking.Trunk.get_lacp_timeout_option(self.trunks) - - def get_link_selection_policy(self): - return self.api.Networking.Trunk.get_link_selection_policy(self.trunks) - - def get_media_speed(self): - return self.api.Networking.Trunk.get_media_speed(self.trunks) - - def get_media_status(self): - return self.api.Networking.Trunk.get_media_status(self.trunks) - - def get_operational_member_count(self): - return self.api.Networking.Trunk.get_operational_member_count(self.trunks) - - def get_stp_enabled_state(self): - return self.api.Networking.Trunk.get_stp_enabled_state(self.trunks) - - def get_stp_protocol_detection_reset_state(self): - return self.api.Networking.Trunk.get_stp_protocol_detection_reset_state(self.trunks) - - -class Vlans(object): - """Vlans class. - - F5 BIG-IP Vlans class. - - Attributes: - api: iControl API instance. - vlans: List of VLANs. - """ - - def __init__(self, api, regex=None): - self.api = api - self.vlans = api.Networking.VLAN.get_list() - if regex: - re_filter = re.compile(regex) - self.vlans = filter(re_filter.search, self.vlans) - - def get_list(self): - return self.vlans - - def get_auto_lasthop(self): - return self.api.Networking.VLAN.get_auto_lasthop(self.vlans) - - def get_cmp_hash_algorithm(self): - return self.api.Networking.VLAN.get_cmp_hash_algorithm(self.vlans) - - def get_description(self): - return self.api.Networking.VLAN.get_description(self.vlans) - - def get_dynamic_forwarding(self): - return self.api.Networking.VLAN.get_dynamic_forwarding(self.vlans) - - def get_failsafe_action(self): - return self.api.Networking.VLAN.get_failsafe_action(self.vlans) - - def get_failsafe_state(self): - return self.api.Networking.VLAN.get_failsafe_state(self.vlans) - - def get_failsafe_timeout(self): - return self.api.Networking.VLAN.get_failsafe_timeout(self.vlans) - - def get_if_index(self): - return self.api.Networking.VLAN.get_if_index(self.vlans) - - def get_learning_mode(self): - return self.api.Networking.VLAN.get_learning_mode(self.vlans) - - def get_mac_masquerade_address(self): - return self.api.Networking.VLAN.get_mac_masquerade_address(self.vlans) - - def get_member(self): - return self.api.Networking.VLAN.get_member(self.vlans) - - def get_mtu(self): - return self.api.Networking.VLAN.get_mtu(self.vlans) - - def get_sflow_poll_interval(self): - return self.api.Networking.VLAN.get_sflow_poll_interval(self.vlans) - - def get_sflow_poll_interval_global(self): - return self.api.Networking.VLAN.get_sflow_poll_interval_global(self.vlans) - - def get_sflow_sampling_rate(self): - return self.api.Networking.VLAN.get_sflow_sampling_rate(self.vlans) - - def get_sflow_sampling_rate_global(self): - return self.api.Networking.VLAN.get_sflow_sampling_rate_global(self.vlans) - - def get_source_check_state(self): - return self.api.Networking.VLAN.get_source_check_state(self.vlans) - - def get_true_mac_address(self): - return self.api.Networking.VLAN.get_true_mac_address(self.vlans) - - def get_vlan_id(self): - return self.api.Networking.VLAN.get_vlan_id(self.vlans) - - -class Software(object): - """Software class. - - F5 BIG-IP software class. - - Attributes: - api: iControl API instance. - """ - - def __init__(self, api): - self.api = api - - def get_all_software_status(self): - return self.api.System.SoftwareManagement.get_all_software_status() - - -class VirtualServers(object): - """Virtual servers class. - - F5 BIG-IP virtual servers class. - - Attributes: - api: iControl API instance. - virtual_servers: List of virtual servers. - """ - - def __init__(self, api, regex=None): - self.api = api - self.virtual_servers = api.LocalLB.VirtualServer.get_list() - if regex: - re_filter = re.compile(regex) - self.virtual_servers = filter(re_filter.search, self.virtual_servers) - - def get_list(self): - return self.virtual_servers - - def get_actual_hardware_acceleration(self): - return self.api.LocalLB.VirtualServer.get_actual_hardware_acceleration(self.virtual_servers) - - def get_authentication_profile(self): - return self.api.LocalLB.VirtualServer.get_authentication_profile(self.virtual_servers) - - def get_auto_lasthop(self): - return self.api.LocalLB.VirtualServer.get_auto_lasthop(self.virtual_servers) - - def get_bw_controller_policy(self): - return self.api.LocalLB.VirtualServer.get_bw_controller_policy(self.virtual_servers) - - def get_clone_pool(self): - return self.api.LocalLB.VirtualServer.get_clone_pool(self.virtual_servers) - - def get_cmp_enable_mode(self): - return self.api.LocalLB.VirtualServer.get_cmp_enable_mode(self.virtual_servers) - - def get_connection_limit(self): - return self.api.LocalLB.VirtualServer.get_connection_limit(self.virtual_servers) - - def get_connection_mirror_state(self): - return self.api.LocalLB.VirtualServer.get_connection_mirror_state(self.virtual_servers) - - def get_default_pool_name(self): - return self.api.LocalLB.VirtualServer.get_default_pool_name(self.virtual_servers) - - def get_description(self): - return self.api.LocalLB.VirtualServer.get_description(self.virtual_servers) - - def get_destination(self): - return self.api.LocalLB.VirtualServer.get_destination_v2(self.virtual_servers) - - def get_enabled_state(self): - return self.api.LocalLB.VirtualServer.get_enabled_state(self.virtual_servers) - - def get_enforced_firewall_policy(self): - return self.api.LocalLB.VirtualServer.get_enforced_firewall_policy(self.virtual_servers) - - def get_fallback_persistence_profile(self): - return self.api.LocalLB.VirtualServer.get_fallback_persistence_profile(self.virtual_servers) - - def get_fw_rule(self): - return self.api.LocalLB.VirtualServer.get_fw_rule(self.virtual_servers) - - def get_gtm_score(self): - return self.api.LocalLB.VirtualServer.get_gtm_score(self.virtual_servers) - - def get_last_hop_pool(self): - return self.api.LocalLB.VirtualServer.get_last_hop_pool(self.virtual_servers) - - def get_nat64_state(self): - return self.api.LocalLB.VirtualServer.get_nat64_state(self.virtual_servers) - - def get_object_status(self): - return self.api.LocalLB.VirtualServer.get_object_status(self.virtual_servers) - - def get_persistence_profile(self): - return self.api.LocalLB.VirtualServer.get_persistence_profile(self.virtual_servers) - - def get_profile(self): - return self.api.LocalLB.VirtualServer.get_profile(self.virtual_servers) - - def get_protocol(self): - return self.api.LocalLB.VirtualServer.get_protocol(self.virtual_servers) - - def get_rate_class(self): - return self.api.LocalLB.VirtualServer.get_rate_class(self.virtual_servers) - - def get_rate_limit(self): - return self.api.LocalLB.VirtualServer.get_rate_limit(self.virtual_servers) - - def get_rate_limit_destination_mask(self): - return self.api.LocalLB.VirtualServer.get_rate_limit_destination_mask(self.virtual_servers) - - def get_rate_limit_mode(self): - return self.api.LocalLB.VirtualServer.get_rate_limit_mode(self.virtual_servers) - - def get_rate_limit_source_mask(self): - return self.api.LocalLB.VirtualServer.get_rate_limit_source_mask(self.virtual_servers) - - def get_related_rule(self): - return self.api.LocalLB.VirtualServer.get_related_rule(self.virtual_servers) - - def get_rule(self): - return self.api.LocalLB.VirtualServer.get_rule(self.virtual_servers) - - def get_security_log_profile(self): - return self.api.LocalLB.VirtualServer.get_security_log_profile(self.virtual_servers) - - def get_snat_pool(self): - return self.api.LocalLB.VirtualServer.get_snat_pool(self.virtual_servers) - - def get_snat_type(self): - return self.api.LocalLB.VirtualServer.get_snat_type(self.virtual_servers) - - def get_source_address(self): - return self.api.LocalLB.VirtualServer.get_source_address(self.virtual_servers) - - def get_source_address_translation_lsn_pool(self): - return self.api.LocalLB.VirtualServer.get_source_address_translation_lsn_pool(self.virtual_servers) - - def get_source_address_translation_snat_pool(self): - return self.api.LocalLB.VirtualServer.get_source_address_translation_snat_pool(self.virtual_servers) - - def get_source_address_translation_type(self): - return self.api.LocalLB.VirtualServer.get_source_address_translation_type(self.virtual_servers) - - def get_source_port_behavior(self): - return self.api.LocalLB.VirtualServer.get_source_port_behavior(self.virtual_servers) - - def get_staged_firewall_policy(self): - return self.api.LocalLB.VirtualServer.get_staged_firewall_policy(self.virtual_servers) - - def get_translate_address_state(self): - return self.api.LocalLB.VirtualServer.get_translate_address_state(self.virtual_servers) - - def get_translate_port_state(self): - return self.api.LocalLB.VirtualServer.get_translate_port_state(self.virtual_servers) - - def get_type(self): - return self.api.LocalLB.VirtualServer.get_type(self.virtual_servers) - - def get_vlan(self): - return self.api.LocalLB.VirtualServer.get_vlan(self.virtual_servers) - - def get_wildmask(self): - return self.api.LocalLB.VirtualServer.get_wildmask(self.virtual_servers) - - -class Pools(object): - """Pools class. - - F5 BIG-IP pools class. - - Attributes: - api: iControl API instance. - pool_names: List of pool names. - """ - - def __init__(self, api, regex=None): - self.api = api - self.pool_names = api.LocalLB.Pool.get_list() - if regex: - re_filter = re.compile(regex) - self.pool_names = filter(re_filter.search, self.pool_names) - - def get_list(self): - return self.pool_names - - def get_action_on_service_down(self): - return self.api.LocalLB.Pool.get_action_on_service_down(self.pool_names) - - def get_active_member_count(self): - return self.api.LocalLB.Pool.get_active_member_count(self.pool_names) - - def get_aggregate_dynamic_ratio(self): - return self.api.LocalLB.Pool.get_aggregate_dynamic_ratio(self.pool_names) - - def get_allow_nat_state(self): - return self.api.LocalLB.Pool.get_allow_nat_state(self.pool_names) - - def get_allow_snat_state(self): - return self.api.LocalLB.Pool.get_allow_snat_state(self.pool_names) - - def get_client_ip_tos(self): - return self.api.LocalLB.Pool.get_client_ip_tos(self.pool_names) - - def get_client_link_qos(self): - return self.api.LocalLB.Pool.get_client_link_qos(self.pool_names) - - def get_description(self): - return self.api.LocalLB.Pool.get_description(self.pool_names) - - def get_gateway_failsafe_device(self): - return self.api.LocalLB.Pool.get_gateway_failsafe_device(self.pool_names) - - def get_ignore_persisted_weight_state(self): - return self.api.LocalLB.Pool.get_ignore_persisted_weight_state(self.pool_names) - - def get_lb_method(self): - return self.api.LocalLB.Pool.get_lb_method(self.pool_names) - - def get_member(self): - return self.api.LocalLB.Pool.get_member_v2(self.pool_names) - - def get_minimum_active_member(self): - return self.api.LocalLB.Pool.get_minimum_active_member(self.pool_names) - - def get_minimum_up_member(self): - return self.api.LocalLB.Pool.get_minimum_up_member(self.pool_names) - - def get_minimum_up_member_action(self): - return self.api.LocalLB.Pool.get_minimum_up_member_action(self.pool_names) - - def get_minimum_up_member_enabled_state(self): - return self.api.LocalLB.Pool.get_minimum_up_member_enabled_state(self.pool_names) - - def get_monitor_association(self): - return self.api.LocalLB.Pool.get_monitor_association(self.pool_names) - - def get_monitor_instance(self): - return self.api.LocalLB.Pool.get_monitor_instance(self.pool_names) - - def get_object_status(self): - return self.api.LocalLB.Pool.get_object_status(self.pool_names) - - def get_profile(self): - return self.api.LocalLB.Pool.get_profile(self.pool_names) - - def get_queue_depth_limit(self): - return self.api.LocalLB.Pool.get_queue_depth_limit(self.pool_names) - - def get_queue_on_connection_limit_state(self): - return self.api.LocalLB.Pool.get_queue_on_connection_limit_state(self.pool_names) - - def get_queue_time_limit(self): - return self.api.LocalLB.Pool.get_queue_time_limit(self.pool_names) - - def get_reselect_tries(self): - return self.api.LocalLB.Pool.get_reselect_tries(self.pool_names) - - def get_server_ip_tos(self): - return self.api.LocalLB.Pool.get_server_ip_tos(self.pool_names) - - def get_server_link_qos(self): - return self.api.LocalLB.Pool.get_server_link_qos(self.pool_names) - - def get_simple_timeout(self): - return self.api.LocalLB.Pool.get_simple_timeout(self.pool_names) - - def get_slow_ramp_time(self): - return self.api.LocalLB.Pool.get_slow_ramp_time(self.pool_names) - - -class Devices(object): - """Devices class. - - F5 BIG-IP devices class. - - Attributes: - api: iControl API instance. - devices: List of devices. - """ - - def __init__(self, api, regex=None): - self.api = api - self.devices = api.Management.Device.get_list() - if regex: - re_filter = re.compile(regex) - self.devices = filter(re_filter.search, self.devices) - - def get_list(self): - return self.devices - - def get_active_modules(self): - return self.api.Management.Device.get_active_modules(self.devices) - - def get_base_mac_address(self): - return self.api.Management.Device.get_base_mac_address(self.devices) - - def get_blade_addresses(self): - return self.api.Management.Device.get_blade_addresses(self.devices) - - def get_build(self): - return self.api.Management.Device.get_build(self.devices) - - def get_chassis_id(self): - return self.api.Management.Device.get_chassis_id(self.devices) - - def get_chassis_type(self): - return self.api.Management.Device.get_chassis_type(self.devices) - - def get_comment(self): - return self.api.Management.Device.get_comment(self.devices) - - def get_configsync_address(self): - return self.api.Management.Device.get_configsync_address(self.devices) - - def get_contact(self): - return self.api.Management.Device.get_contact(self.devices) - - def get_description(self): - return self.api.Management.Device.get_description(self.devices) - - def get_edition(self): - return self.api.Management.Device.get_edition(self.devices) - - def get_failover_state(self): - return self.api.Management.Device.get_failover_state(self.devices) - - def get_local_device(self): - return self.api.Management.Device.get_local_device() - - def get_hostname(self): - return self.api.Management.Device.get_hostname(self.devices) - - def get_inactive_modules(self): - return self.api.Management.Device.get_inactive_modules(self.devices) - - def get_location(self): - return self.api.Management.Device.get_location(self.devices) - - def get_management_address(self): - return self.api.Management.Device.get_management_address(self.devices) - - def get_marketing_name(self): - return self.api.Management.Device.get_marketing_name(self.devices) - - def get_multicast_address(self): - return self.api.Management.Device.get_multicast_address(self.devices) - - def get_optional_modules(self): - return self.api.Management.Device.get_optional_modules(self.devices) - - def get_platform_id(self): - return self.api.Management.Device.get_platform_id(self.devices) - - def get_primary_mirror_address(self): - return self.api.Management.Device.get_primary_mirror_address(self.devices) - - def get_product(self): - return self.api.Management.Device.get_product(self.devices) - - def get_secondary_mirror_address(self): - return self.api.Management.Device.get_secondary_mirror_address(self.devices) - - def get_software_version(self): - return self.api.Management.Device.get_software_version(self.devices) - - def get_timelimited_modules(self): - return self.api.Management.Device.get_timelimited_modules(self.devices) - - def get_timezone(self): - return self.api.Management.Device.get_timezone(self.devices) - - def get_unicast_addresses(self): - return self.api.Management.Device.get_unicast_addresses(self.devices) - - -class DeviceGroups(object): - """Device groups class. - - F5 BIG-IP device groups class. - - Attributes: - api: iControl API instance. - device_groups: List of device groups. - """ - - def __init__(self, api, regex=None): - self.api = api - self.device_groups = api.Management.DeviceGroup.get_list() - if regex: - re_filter = re.compile(regex) - self.device_groups = filter(re_filter.search, self.device_groups) - - def get_list(self): - return self.device_groups - - def get_all_preferred_active(self): - return self.api.Management.DeviceGroup.get_all_preferred_active(self.device_groups) - - def get_autosync_enabled_state(self): - return self.api.Management.DeviceGroup.get_autosync_enabled_state(self.device_groups) - - def get_description(self): - return self.api.Management.DeviceGroup.get_description(self.device_groups) - - def get_device(self): - return self.api.Management.DeviceGroup.get_device(self.device_groups) - - def get_full_load_on_sync_state(self): - return self.api.Management.DeviceGroup.get_full_load_on_sync_state(self.device_groups) - - def get_incremental_config_sync_size_maximum(self): - return self.api.Management.DeviceGroup.get_incremental_config_sync_size_maximum(self.device_groups) - - def get_network_failover_enabled_state(self): - return self.api.Management.DeviceGroup.get_network_failover_enabled_state(self.device_groups) - - def get_sync_status(self): - return self.api.Management.DeviceGroup.get_sync_status(self.device_groups) - - def get_type(self): - return self.api.Management.DeviceGroup.get_type(self.device_groups) - - -class TrafficGroups(object): - """Traffic groups class. - - F5 BIG-IP traffic groups class. - - Attributes: - api: iControl API instance. - traffic_groups: List of traffic groups. - """ - - def __init__(self, api, regex=None): - self.api = api - self.traffic_groups = api.Management.TrafficGroup.get_list() - if regex: - re_filter = re.compile(regex) - self.traffic_groups = filter(re_filter.search, self.traffic_groups) - - def get_list(self): - return self.traffic_groups - - def get_auto_failback_enabled_state(self): - return self.api.Management.TrafficGroup.get_auto_failback_enabled_state(self.traffic_groups) - - def get_auto_failback_time(self): - return self.api.Management.TrafficGroup.get_auto_failback_time(self.traffic_groups) - - def get_default_device(self): - return self.api.Management.TrafficGroup.get_default_device(self.traffic_groups) - - def get_description(self): - return self.api.Management.TrafficGroup.get_description(self.traffic_groups) - - def get_ha_load_factor(self): - return self.api.Management.TrafficGroup.get_ha_load_factor(self.traffic_groups) - - def get_ha_order(self): - return self.api.Management.TrafficGroup.get_ha_order(self.traffic_groups) - - def get_is_floating(self): - return self.api.Management.TrafficGroup.get_is_floating(self.traffic_groups) - - def get_mac_masquerade_address(self): - return self.api.Management.TrafficGroup.get_mac_masquerade_address(self.traffic_groups) - - def get_unit_id(self): - return self.api.Management.TrafficGroup.get_unit_id(self.traffic_groups) - - -class Rules(object): - """Rules class. - - F5 BIG-IP iRules class. - - Attributes: - api: iControl API instance. - rules: List of iRules. - """ - - def __init__(self, api, regex=None): - self.api = api - self.rules = api.LocalLB.Rule.get_list() - if regex: - re_filter = re.compile(regex) - self.traffic_groups = filter(re_filter.search, self.rules) - - def get_list(self): - return self.rules - - def get_description(self): - return self.api.LocalLB.Rule.get_description(rule_names=self.rules) - - def get_ignore_vertification(self): - return self.api.LocalLB.Rule.get_ignore_vertification(rule_names=self.rules) - - def get_verification_status(self): - return self.api.LocalLB.Rule.get_verification_status_v2(rule_names=self.rules) - - def get_definition(self): - return [x['rule_definition'] for x in self.api.LocalLB.Rule.query_rule(rule_names=self.rules)] - -class Nodes(object): - """Nodes class. - - F5 BIG-IP nodes class. - - Attributes: - api: iControl API instance. - nodes: List of nodes. - """ - - def __init__(self, api, regex=None): - self.api = api - self.nodes = api.LocalLB.NodeAddressV2.get_list() - if regex: - re_filter = re.compile(regex) - self.nodes = filter(re_filter.search, self.nodes) - - def get_list(self): - return self.nodes - - def get_address(self): - return self.api.LocalLB.NodeAddressV2.get_address(nodes=self.nodes) - - def get_connection_limit(self): - return self.api.LocalLB.NodeAddressV2.get_connection_limit(nodes=self.nodes) - - def get_description(self): - return self.api.LocalLB.NodeAddressV2.get_description(nodes=self.nodes) - - def get_dynamic_ratio(self): - return self.api.LocalLB.NodeAddressV2.get_dynamic_ratio_v2(nodes=self.nodes) - - def get_monitor_instance(self): - return self.api.LocalLB.NodeAddressV2.get_monitor_instance(nodes=self.nodes) - - def get_monitor_rule(self): - return self.api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=self.nodes) - - def get_monitor_status(self): - return self.api.LocalLB.NodeAddressV2.get_monitor_status(nodes=self.nodes) - - def get_object_status(self): - return self.api.LocalLB.NodeAddressV2.get_object_status(nodes=self.nodes) - - def get_rate_limit(self): - return self.api.LocalLB.NodeAddressV2.get_rate_limit(nodes=self.nodes) - - def get_ratio(self): - return self.api.LocalLB.NodeAddressV2.get_ratio(nodes=self.nodes) - - def get_session_status(self): - return self.api.LocalLB.NodeAddressV2.get_session_status(nodes=self.nodes) - - -class VirtualAddresses(object): - """Virtual addresses class. - - F5 BIG-IP virtual addresses class. - - Attributes: - api: iControl API instance. - virtual_addresses: List of virtual addresses. - """ - - def __init__(self, api, regex=None): - self.api = api - self.virtual_addresses = api.LocalLB.VirtualAddressV2.get_list() - if regex: - re_filter = re.compile(regex) - self.virtual_addresses = filter(re_filter.search, self.virtual_addresses) - - def get_list(self): - return self.virtual_addresses - - def get_address(self): - return self.api.LocalLB.VirtualAddressV2.get_address(self.virtual_addresses) - - def get_arp_state(self): - return self.api.LocalLB.VirtualAddressV2.get_arp_state(self.virtual_addresses) - - def get_auto_delete_state(self): - return self.api.LocalLB.VirtualAddressV2.get_auto_delete_state(self.virtual_addresses) - - def get_connection_limit(self): - return self.api.LocalLB.VirtualAddressV2.get_connection_limit(self.virtual_addresses) - - def get_description(self): - return self.api.LocalLB.VirtualAddressV2.get_description(self.virtual_addresses) - - def get_enabled_state(self): - return self.api.LocalLB.VirtualAddressV2.get_enabled_state(self.virtual_addresses) - - def get_icmp_echo_state(self): - return self.api.LocalLB.VirtualAddressV2.get_icmp_echo_state(self.virtual_addresses) - - def get_is_floating_state(self): - return self.api.LocalLB.VirtualAddressV2.get_is_floating_state(self.virtual_addresses) - - def get_netmask(self): - return self.api.LocalLB.VirtualAddressV2.get_netmask(self.virtual_addresses) - - def get_object_status(self): - return self.api.LocalLB.VirtualAddressV2.get_object_status(self.virtual_addresses) - - def get_route_advertisement_state(self): - return self.api.LocalLB.VirtualAddressV2.get_route_advertisement_state(self.virtual_addresses) - - def get_traffic_group(self): - return self.api.LocalLB.VirtualAddressV2.get_traffic_group(self.virtual_addresses) - - -class AddressClasses(object): - """Address group/class class. - - F5 BIG-IP address group/class class. - - Attributes: - api: iControl API instance. - address_classes: List of address classes. - """ - - def __init__(self, api, regex=None): - self.api = api - self.address_classes = api.LocalLB.Class.get_address_class_list() - if regex: - re_filter = re.compile(regex) - self.address_classes = filter(re_filter.search, self.address_classes) - - def get_list(self): - return self.address_classes - - def get_address_class(self): - key = self.api.LocalLB.Class.get_address_class(self.address_classes) - value = self.api.LocalLB.Class.get_address_class_member_data_value(key) - result = map(zip, [x['members'] for x in key], value) - return result - - def get_description(self): - return self.api.LocalLB.Class.get_description(self.address_classes) - - -class Certificates(object): - """Certificates class. - - F5 BIG-IP certificates class. - - Attributes: - api: iControl API instance. - certificates: List of certificate identifiers. - certificate_list: List of certificate information structures. - """ - - def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"): - self.api = api - self.certificate_list = api.Management.KeyCertificate.get_certificate_list(mode=mode) - self.certificates = [x['certificate']['cert_info']['id'] for x in self.certificate_list] - if regex: - re_filter = re.compile(regex) - self.certificates = filter(re_filter.search, self.certificates) - self.certificate_list = [x for x in self.certificate_list if x['certificate']['cert_info']['id'] in self.certificates] - - def get_list(self): - return self.certificates - - def get_certificate_list(self): - return self.certificate_list - - -class Keys(object): - """Keys class. - - F5 BIG-IP keys class. - - Attributes: - api: iControl API instance. - keys: List of key identifiers. - key_list: List of key information structures. - """ - - def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"): - self.api = api - self.key_list = api.Management.KeyCertificate.get_key_list(mode=mode) - self.keys = [x['key_info']['id'] for x in self.key_list] - if regex: - re_filter = re.compile(regex) - self.keys = filter(re_filter.search, self.keys) - self.key_list = [x for x in self.key_list if x['key_info']['id'] in self.keys] - - def get_list(self): - return self.keys - - def get_key_list(self): - return self.key_list - - -class ProfileClientSSL(object): - """Client SSL profiles class. - - F5 BIG-IP client SSL profiles class. - - Attributes: - api: iControl API instance. - profiles: List of client SSL profiles. - """ - - def __init__(self, api, regex=None): - self.api = api - self.profiles = api.LocalLB.ProfileClientSSL.get_list() - if regex: - re_filter = re.compile(regex) - self.profiles = filter(re_filter.search, self.profiles) - - def get_list(self): - return self.profiles - - def get_alert_timeout(self): - return self.api.LocalLB.ProfileClientSSL.get_alert_timeout(self.profiles) - - def get_allow_nonssl_state(self): - return self.api.LocalLB.ProfileClientSSL.get_allow_nonssl_state(self.profiles) - - def get_authenticate_depth(self): - return self.api.LocalLB.ProfileClientSSL.get_authenticate_depth(self.profiles) - - def get_authenticate_once_state(self): - return self.api.LocalLB.ProfileClientSSL.get_authenticate_once_state(self.profiles) - - def get_ca_file(self): - return self.api.LocalLB.ProfileClientSSL.get_ca_file_v2(self.profiles) - - def get_cache_size(self): - return self.api.LocalLB.ProfileClientSSL.get_cache_size(self.profiles) - - def get_cache_timeout(self): - return self.api.LocalLB.ProfileClientSSL.get_cache_timeout(self.profiles) - - def get_certificate_file(self): - return self.api.LocalLB.ProfileClientSSL.get_certificate_file_v2(self.profiles) - - def get_chain_file(self): - return self.api.LocalLB.ProfileClientSSL.get_chain_file_v2(self.profiles) - - def get_cipher_list(self): - return self.api.LocalLB.ProfileClientSSL.get_cipher_list(self.profiles) - - def get_client_certificate_ca_file(self): - return self.api.LocalLB.ProfileClientSSL.get_client_certificate_ca_file_v2(self.profiles) - - def get_crl_file(self): - return self.api.LocalLB.ProfileClientSSL.get_crl_file_v2(self.profiles) - - def get_default_profile(self): - return self.api.LocalLB.ProfileClientSSL.get_default_profile(self.profiles) - - def get_description(self): - return self.api.LocalLB.ProfileClientSSL.get_description(self.profiles) - - def get_forward_proxy_ca_certificate_file(self): - return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_certificate_file(self.profiles) - - def get_forward_proxy_ca_key_file(self): - return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_key_file(self.profiles) - - def get_forward_proxy_ca_passphrase(self): - return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_passphrase(self.profiles) - - def get_forward_proxy_certificate_extension_include(self): - return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_extension_include(self.profiles) - - def get_forward_proxy_certificate_lifespan(self): - return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_lifespan(self.profiles) - - def get_forward_proxy_enabled_state(self): - return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_enabled_state(self.profiles) - - def get_forward_proxy_lookup_by_ipaddr_port_state(self): - return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_lookup_by_ipaddr_port_state(self.profiles) - - def get_handshake_timeout(self): - return self.api.LocalLB.ProfileClientSSL.get_handshake_timeout(self.profiles) - - def get_key_file(self): - return self.api.LocalLB.ProfileClientSSL.get_key_file_v2(self.profiles) - - def get_modssl_emulation_state(self): - return self.api.LocalLB.ProfileClientSSL.get_modssl_emulation_state(self.profiles) - - def get_passphrase(self): - return self.api.LocalLB.ProfileClientSSL.get_passphrase(self.profiles) - - def get_peer_certification_mode(self): - return self.api.LocalLB.ProfileClientSSL.get_peer_certification_mode(self.profiles) - - def get_profile_mode(self): - return self.api.LocalLB.ProfileClientSSL.get_profile_mode(self.profiles) - - def get_renegotiation_maximum_record_delay(self): - return self.api.LocalLB.ProfileClientSSL.get_renegotiation_maximum_record_delay(self.profiles) - - def get_renegotiation_period(self): - return self.api.LocalLB.ProfileClientSSL.get_renegotiation_period(self.profiles) - - def get_renegotiation_state(self): - return self.api.LocalLB.ProfileClientSSL.get_renegotiation_state(self.profiles) - - def get_renegotiation_throughput(self): - return self.api.LocalLB.ProfileClientSSL.get_renegotiation_throughput(self.profiles) - - def get_retain_certificate_state(self): - return self.api.LocalLB.ProfileClientSSL.get_retain_certificate_state(self.profiles) - - def get_secure_renegotiation_mode(self): - return self.api.LocalLB.ProfileClientSSL.get_secure_renegotiation_mode(self.profiles) - - def get_server_name(self): - return self.api.LocalLB.ProfileClientSSL.get_server_name(self.profiles) - - def get_session_ticket_state(self): - return self.api.LocalLB.ProfileClientSSL.get_session_ticket_state(self.profiles) - - def get_sni_default_state(self): - return self.api.LocalLB.ProfileClientSSL.get_sni_default_state(self.profiles) - - def get_sni_require_state(self): - return self.api.LocalLB.ProfileClientSSL.get_sni_require_state(self.profiles) - - def get_ssl_option(self): - return self.api.LocalLB.ProfileClientSSL.get_ssl_option(self.profiles) - - def get_strict_resume_state(self): - return self.api.LocalLB.ProfileClientSSL.get_strict_resume_state(self.profiles) - - def get_unclean_shutdown_state(self): - return self.api.LocalLB.ProfileClientSSL.get_unclean_shutdown_state(self.profiles) - - def get_is_base_profile(self): - return self.api.LocalLB.ProfileClientSSL.is_base_profile(self.profiles) - - def get_is_system_profile(self): - return self.api.LocalLB.ProfileClientSSL.is_system_profile(self.profiles) - - -class SystemInfo(object): - """System information class. - - F5 BIG-IP system information class. - - Attributes: - api: iControl API instance. - """ - - def __init__(self, api): - self.api = api - - def get_base_mac_address(self): - return self.api.System.SystemInfo.get_base_mac_address() - - def get_blade_temperature(self): - return self.api.System.SystemInfo.get_blade_temperature() - - def get_chassis_slot_information(self): - return self.api.System.SystemInfo.get_chassis_slot_information() - - def get_globally_unique_identifier(self): - return self.api.System.SystemInfo.get_globally_unique_identifier() - - def get_group_id(self): - return self.api.System.SystemInfo.get_group_id() - - def get_hardware_information(self): - return self.api.System.SystemInfo.get_hardware_information() - - def get_marketing_name(self): - return self.api.System.SystemInfo.get_marketing_name() - - def get_product_information(self): - return self.api.System.SystemInfo.get_product_information() - - def get_pva_version(self): - return self.api.System.SystemInfo.get_pva_version() - - def get_system_id(self): - return self.api.System.SystemInfo.get_system_id() - - def get_system_information(self): - return self.api.System.SystemInfo.get_system_information() - - def get_time(self): - return self.api.System.SystemInfo.get_time() - - def get_time_zone(self): - return self.api.System.SystemInfo.get_time_zone() - - def get_uptime(self): - return self.api.System.SystemInfo.get_uptime() - - -def generate_dict(api_obj, fields): - result_dict = {} - lists = [] - supported_fields = [] - if api_obj.get_list(): - for field in fields: - try: - api_response = getattr(api_obj, "get_" + field)() - except MethodNotFound: - pass - else: - lists.append(api_response) - supported_fields.append(field) - for i, j in enumerate(api_obj.get_list()): - temp = {} - temp.update([(item[0], item[1][i]) for item in zip(supported_fields, lists)]) - result_dict[j] = temp - return result_dict - -def generate_simple_dict(api_obj, fields): - result_dict = {} - for field in fields: - try: - api_response = getattr(api_obj, "get_" + field)() - except MethodNotFound: - pass - else: - result_dict[field] = api_response - return result_dict - -def generate_interface_dict(f5, regex): - interfaces = Interfaces(f5.get_api(), regex) - fields = ['active_media', 'actual_flow_control', 'bundle_state', - 'description', 'dual_media_state', 'enabled_state', 'if_index', - 'learning_mode', 'lldp_admin_status', 'lldp_tlvmap', - 'mac_address', 'media', 'media_option', 'media_option_sfp', - 'media_sfp', 'media_speed', 'media_status', 'mtu', - 'phy_master_slave_mode', 'prefer_sfp_state', 'flow_control', - 'sflow_poll_interval', 'sflow_poll_interval_global', - 'sfp_media_state', 'stp_active_edge_port_state', - 'stp_enabled_state', 'stp_link_type', - 'stp_protocol_detection_reset_state'] - return generate_dict(interfaces, fields) - -def generate_self_ip_dict(f5, regex): - self_ips = SelfIPs(f5.get_api(), regex) - fields = ['address', 'allow_access_list', 'description', - 'enforced_firewall_policy', 'floating_state', 'fw_rule', - 'netmask', 'staged_firewall_policy', 'traffic_group', - 'vlan', 'is_traffic_group_inherited'] - return generate_dict(self_ips, fields) - -def generate_trunk_dict(f5, regex): - trunks = Trunks(f5.get_api(), regex) - fields = ['active_lacp_state', 'configured_member_count', 'description', - 'distribution_hash_option', 'interface', 'lacp_enabled_state', - 'lacp_timeout_option', 'link_selection_policy', 'media_speed', - 'media_status', 'operational_member_count', 'stp_enabled_state', - 'stp_protocol_detection_reset_state'] - return generate_dict(trunks, fields) - -def generate_vlan_dict(f5, regex): - vlans = Vlans(f5.get_api(), regex) - fields = ['auto_lasthop', 'cmp_hash_algorithm', 'description', - 'dynamic_forwarding', 'failsafe_action', 'failsafe_state', - 'failsafe_timeout', 'if_index', 'learning_mode', - 'mac_masquerade_address', 'member', 'mtu', - 'sflow_poll_interval', 'sflow_poll_interval_global', - 'sflow_sampling_rate', 'sflow_sampling_rate_global', - 'source_check_state', 'true_mac_address', 'vlan_id'] - return generate_dict(vlans, fields) - -def generate_vs_dict(f5, regex): - virtual_servers = VirtualServers(f5.get_api(), regex) - fields = ['actual_hardware_acceleration', 'authentication_profile', - 'auto_lasthop', 'bw_controller_policy', 'clone_pool', - 'cmp_enable_mode', 'connection_limit', 'connection_mirror_state', - 'default_pool_name', 'description', 'destination', - 'enabled_state', 'enforced_firewall_policy', - 'fallback_persistence_profile', 'fw_rule', 'gtm_score', - 'last_hop_pool', 'nat64_state', 'object_status', - 'persistence_profile', 'profile', 'protocol', - 'rate_class', 'rate_limit', 'rate_limit_destination_mask', - 'rate_limit_mode', 'rate_limit_source_mask', 'related_rule', - 'rule', 'security_log_profile', 'snat_pool', 'snat_type', - 'source_address', 'source_address_translation_lsn_pool', - 'source_address_translation_snat_pool', - 'source_address_translation_type', 'source_port_behavior', - 'staged_firewall_policy', 'translate_address_state', - 'translate_port_state', 'type', 'vlan', 'wildmask'] - return generate_dict(virtual_servers, fields) - -def generate_pool_dict(f5, regex): - pools = Pools(f5.get_api(), regex) - fields = ['action_on_service_down', 'active_member_count', - 'aggregate_dynamic_ratio', 'allow_nat_state', - 'allow_snat_state', 'client_ip_tos', 'client_link_qos', - 'description', 'gateway_failsafe_device', - 'ignore_persisted_weight_state', 'lb_method', 'member', - 'minimum_active_member', 'minimum_up_member', - 'minimum_up_member_action', 'minimum_up_member_enabled_state', - 'monitor_association', 'monitor_instance', 'object_status', - 'profile', 'queue_depth_limit', - 'queue_on_connection_limit_state', 'queue_time_limit', - 'reselect_tries', 'server_ip_tos', 'server_link_qos', - 'simple_timeout', 'slow_ramp_time'] - return generate_dict(pools, fields) - -def generate_device_dict(f5, regex): - devices = Devices(f5.get_api(), regex) - fields = ['active_modules', 'base_mac_address', 'blade_addresses', - 'build', 'chassis_id', 'chassis_type', 'comment', - 'configsync_address', 'contact', 'description', 'edition', - 'failover_state', 'hostname', 'inactive_modules', 'location', - 'management_address', 'marketing_name', 'multicast_address', - 'optional_modules', 'platform_id', 'primary_mirror_address', - 'product', 'secondary_mirror_address', 'software_version', - 'timelimited_modules', 'timezone', 'unicast_addresses'] - return generate_dict(devices, fields) - -def generate_device_group_dict(f5, regex): - device_groups = DeviceGroups(f5.get_api(), regex) - fields = ['all_preferred_active', 'autosync_enabled_state','description', - 'device', 'full_load_on_sync_state', - 'incremental_config_sync_size_maximum', - 'network_failover_enabled_state', 'sync_status', 'type'] - return generate_dict(device_groups, fields) - -def generate_traffic_group_dict(f5, regex): - traffic_groups = TrafficGroups(f5.get_api(), regex) - fields = ['auto_failback_enabled_state', 'auto_failback_time', - 'default_device', 'description', 'ha_load_factor', - 'ha_order', 'is_floating', 'mac_masquerade_address', - 'unit_id'] - return generate_dict(traffic_groups, fields) - -def generate_rule_dict(f5, regex): - rules = Rules(f5.get_api(), regex) - fields = ['definition', 'description', 'ignore_vertification', - 'verification_status'] - return generate_dict(rules, fields) - -def generate_node_dict(f5, regex): - nodes = Nodes(f5.get_api(), regex) - fields = ['address', 'connection_limit', 'description', 'dynamic_ratio', - 'monitor_instance', 'monitor_rule', 'monitor_status', - 'object_status', 'rate_limit', 'ratio', 'session_status'] - return generate_dict(nodes, fields) - -def generate_virtual_address_dict(f5, regex): - virtual_addresses = VirtualAddresses(f5.get_api(), regex) - fields = ['address', 'arp_state', 'auto_delete_state', 'connection_limit', - 'description', 'enabled_state', 'icmp_echo_state', - 'is_floating_state', 'netmask', 'object_status', - 'route_advertisement_state', 'traffic_group'] - return generate_dict(virtual_addresses, fields) - -def generate_address_class_dict(f5, regex): - address_classes = AddressClasses(f5.get_api(), regex) - fields = ['address_class', 'description'] - return generate_dict(address_classes, fields) - -def generate_certificate_dict(f5, regex): - certificates = Certificates(f5.get_api(), regex) - return dict(zip(certificates.get_list(), certificates.get_certificate_list())) - -def generate_key_dict(f5, regex): - keys = Keys(f5.get_api(), regex) - return dict(zip(keys.get_list(), keys.get_key_list())) - -def generate_client_ssl_profile_dict(f5, regex): - profiles = ProfileClientSSL(f5.get_api(), regex) - fields = ['alert_timeout', 'allow_nonssl_state', 'authenticate_depth', - 'authenticate_once_state', 'ca_file', 'cache_size', - 'cache_timeout', 'certificate_file', 'chain_file', - 'cipher_list', 'client_certificate_ca_file', 'crl_file', - 'default_profile', 'description', - 'forward_proxy_ca_certificate_file', 'forward_proxy_ca_key_file', - 'forward_proxy_ca_passphrase', - 'forward_proxy_certificate_extension_include', - 'forward_proxy_certificate_lifespan', - 'forward_proxy_enabled_state', - 'forward_proxy_lookup_by_ipaddr_port_state', 'handshake_timeout', - 'key_file', 'modssl_emulation_state', 'passphrase', - 'peer_certification_mode', 'profile_mode', - 'renegotiation_maximum_record_delay', 'renegotiation_period', - 'renegotiation_state', 'renegotiation_throughput', - 'retain_certificate_state', 'secure_renegotiation_mode', - 'server_name', 'session_ticket_state', 'sni_default_state', - 'sni_require_state', 'ssl_option', 'strict_resume_state', - 'unclean_shutdown_state', 'is_base_profile', 'is_system_profile'] - return generate_dict(profiles, fields) - -def generate_system_info_dict(f5): - system_info = SystemInfo(f5.get_api()) - fields = ['base_mac_address', - 'blade_temperature', 'chassis_slot_information', - 'globally_unique_identifier', 'group_id', - 'hardware_information', - 'marketing_name', - 'product_information', 'pva_version', 'system_id', - 'system_information', 'time', - 'time_zone', 'uptime'] - return generate_simple_dict(system_info, fields) - -def generate_software_list(f5): - software = Software(f5.get_api()) - software_list = software.get_all_software_status() - return software_list - - -def main(): - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - session = dict(type='bool', default=False), - include = dict(type='list', required=True), - filter = dict(type='str', required=False), - ) - ) - - if not bigsuds_found: - module.fail_json(msg="the python suds and bigsuds modules is required") - - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - session = module.params['session'] - fact_filter = module.params['filter'] - if fact_filter: - regex = fnmatch.translate(fact_filter) - else: - regex = None - include = map(lambda x: x.lower(), module.params['include']) - valid_includes = ('address_class', 'certificate', 'client_ssl_profile', - 'device_group', 'interface', 'key', 'node', 'pool', - 'rule', 'self_ip', 'software', 'system_info', - 'traffic_group', 'trunk', 'virtual_address', - 'virtual_server', 'vlan') - include_test = map(lambda x: x in valid_includes, include) - if not all(include_test): - module.fail_json(msg="value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include))) - - try: - facts = {} - - if len(include) > 0: - f5 = F5(server, user, password, session) - saved_active_folder = f5.get_active_folder() - saved_recursive_query_state = f5.get_recursive_query_state() - if saved_active_folder != "/": - f5.set_active_folder("/") - if saved_recursive_query_state != "STATE_ENABLED": - f5.enable_recursive_query_state() - - if 'interface' in include: - facts['interface'] = generate_interface_dict(f5, regex) - if 'self_ip' in include: - facts['self_ip'] = generate_self_ip_dict(f5, regex) - if 'trunk' in include: - facts['trunk'] = generate_trunk_dict(f5, regex) - if 'vlan' in include: - facts['vlan'] = generate_vlan_dict(f5, regex) - if 'virtual_server' in include: - facts['virtual_server'] = generate_vs_dict(f5, regex) - if 'pool' in include: - facts['pool'] = generate_pool_dict(f5, regex) - if 'device' in include: - facts['device'] = generate_device_dict(f5, regex) - if 'device_group' in include: - facts['device_group'] = generate_device_group_dict(f5, regex) - if 'traffic_group' in include: - facts['traffic_group'] = generate_traffic_group_dict(f5, regex) - if 'rule' in include: - facts['rule'] = generate_rule_dict(f5, regex) - if 'node' in include: - facts['node'] = generate_node_dict(f5, regex) - if 'virtual_address' in include: - facts['virtual_address'] = generate_virtual_address_dict(f5, regex) - if 'address_class' in include: - facts['address_class'] = generate_address_class_dict(f5, regex) - if 'software' in include: - facts['software'] = generate_software_list(f5) - if 'certificate' in include: - facts['certificate'] = generate_certificate_dict(f5, regex) - if 'key' in include: - facts['key'] = generate_key_dict(f5, regex) - if 'client_ssl_profile' in include: - facts['client_ssl_profile'] = generate_client_ssl_profile_dict(f5, regex) - if 'system_info' in include: - facts['system_info'] = generate_system_info_dict(f5) - - # restore saved state - if saved_active_folder and saved_active_folder != "/": - f5.set_active_folder(saved_active_folder) - if saved_recursive_query_state and \ - saved_recursive_query_state != "STATE_ENABLED": - f5.set_recursive_query_state(saved_recursive_query_state) - - result = {'ansible_facts': facts} - - except Exception, e: - module.fail_json(msg="received exception: %s\ntraceback: %s" % (e, traceback.format_exc())) - - module.exit_json(**result) - -# include magic from lib/ansible/module_common.py -#<> -main() - diff --git a/library/net_infrastructure/bigip_monitor_http b/library/net_infrastructure/bigip_monitor_http deleted file mode 100644 index 62823f86579..00000000000 --- a/library/net_infrastructure/bigip_monitor_http +++ /dev/null @@ -1,464 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, serge van Ginderachter -# based on Matt Hite's bigip_pool module -# (c) 2013, Matt Hite -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: bigip_monitor_http -short_description: "Manages F5 BIG-IP LTM http monitors" -description: - - "Manages F5 BIG-IP LTM monitors via iControl SOAP API" -version_added: "1.4" -author: Serge van Ginderachter -notes: - - "Requires BIG-IP software version >= 11" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" - - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx" -requirements: - - bigsuds -options: - server: - description: - - BIG-IP host - required: true - default: null - user: - description: - - BIG-IP username - required: true - default: null - password: - description: - - BIG-IP password - required: true - default: null - state: - description: - - Monitor state - required: false - default: 'present' - choices: ['present', 'absent'] - name: - description: - - Monitor name - required: true - default: null - aliases: ['monitor'] - partition: - description: - - Partition for the monitor - required: false - default: 'Common' - parent: - description: - - The parent template of this monitor template - required: false - default: 'http' - parent_partition: - description: - - Partition for the parent monitor - required: false - default: 'Common' - send: - description: - - The send string for the monitor call - required: true - default: none - receive: - description: - - The receive string for the monitor call - required: true - default: none - receive_disable: - description: - - The receive disable string for the monitor call - required: true - default: none - ip: - description: - - IP address part of the ipport definition. The default API setting - is "0.0.0.0". - required: false - default: none - port: - description: - - port address part op the ipport definition. The default API - setting is 0. - required: false - default: none - interval: - description: - - The interval specifying how frequently the monitor instance - of this template will run. By default, this interval is used for up and - down states. The default API setting is 5. - required: false - default: none - timeout: - description: - - The number of seconds in which the node or service must respond to - the monitor request. If the target responds within the set time - period, it is considered up. If the target does not respond within - the set time period, it is considered down. You can change this - number to any number you want, however, it should be 3 times the - interval number of seconds plus 1 second. The default API setting - is 16. - required: false - default: none - time_until_up: - description: - - Specifies the amount of time in seconds after the first successful - response before a node will be marked up. A value of 0 will cause a - node to be marked up immediately after a valid response is received - from the node. The default API setting is 0. - required: false - default: none -''' - -EXAMPLES = ''' -- name: BIGIP F5 | Create HTTP Monitor - local_action: - module: bigip_monitor_http - state: present - server: "{{ f5server }}" - user: "{{ f5user }}" - password: "{{ f5password }}" - name: "{{ item.monitorname }}" - send: "{{ item.send }}" - receive: "{{ item.receive }}" - with_items: f5monitors -- name: BIGIP F5 | Remove HTTP Monitor - local_action: - module: bigip_monitor_http - state: absent - server: "{{ f5server }}" - user: "{{ f5user }}" - password: "{{ f5password }}" - name: "{{ monitorname }}" -''' - -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -TEMPLATE_TYPE = 'TTYPE_HTTP' -DEFAULT_PARENT_TYPE = 'http' - - -# =========================================== -# bigip_monitor module generic methods. -# these should be re-useable for other monitor types -# - -def bigip_api(bigip, user, password): - - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - - -def check_monitor_exists(module, api, monitor, parent): - - # hack to determine if monitor exists - result = False - try: - ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0] - parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0] - if ttype == TEMPLATE_TYPE and parent == parent2: - result = True - else: - module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent)) - except bigsuds.OperationFailed, e: - if "was not found" in str(e): - result = False - else: - # genuine exception - raise - return result - - -def create_monitor(api, monitor, template_attributes): - - try: - api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes]) - except bigsuds.OperationFailed, e: - if "already exists" in str(e): - return False - else: - # genuine exception - raise - return True - - -def delete_monitor(api, monitor): - - try: - api.LocalLB.Monitor.delete_template(template_names=[monitor]) - except bigsuds.OperationFailed, e: - # maybe it was deleted since we checked - if "was not found" in str(e): - return False - else: - # genuine exception - raise - return True - - -def check_string_property(api, monitor, str_property): - - try: - return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0] - except bigsuds.OperationFailed, e: - # happens in check mode if not created yet - if "was not found" in str(e): - return True - else: - # genuine exception - raise - - -def set_string_property(api, monitor, str_property): - - api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property]) - - -def check_integer_property(api, monitor, int_property): - - try: - return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0] - except bigsuds.OperationFailed, e: - # happens in check mode if not created yet - if "was not found" in str(e): - return True - else: - # genuine exception - raise - - - -def set_integer_property(api, monitor, int_property): - - api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property]) - - -def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties): - - changed = False - for str_property in template_string_properties: - if str_property['value'] is not None and not check_string_property(api, monitor, str_property): - if not module.check_mode: - set_string_property(api, monitor, str_property) - changed = True - for int_property in template_integer_properties: - if int_property['value'] is not None and not check_integer_property(api, monitor, int_property): - if not module.check_mode: - set_integer_property(api, monitor, int_property) - changed = True - - return changed - - -def get_ipport(api, monitor): - - return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0] - - -def set_ipport(api, monitor, ipport): - - try: - api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport]) - return True, "" - - except bigsuds.OperationFailed, e: - if "Cannot modify the address type of monitor" in str(e): - return False, "Cannot modify the address type of monitor if already assigned to a pool." - else: - # genuine exception - raise - -# =========================================== -# main loop -# -# writing a module for other monitor types should -# only need an updated main() (and monitor specific functions) - -def main(): - - # begin monitor specific stuff - - module = AnsibleModule( - argument_spec = dict( - server = dict(required=True), - user = dict(required=True), - password = dict(required=True), - partition = dict(default='Common'), - state = dict(default='present', choices=['present', 'absent']), - name = dict(required=True), - parent = dict(default=DEFAULT_PARENT_TYPE), - parent_partition = dict(default='Common'), - send = dict(required=False), - receive = dict(required=False), - receive_disable = dict(required=False), - ip = dict(required=False), - port = dict(required=False, type='int'), - interval = dict(required=False, type='int'), - timeout = dict(required=False, type='int'), - time_until_up = dict(required=False, type='int', default=0) - ), - supports_check_mode=True - ) - - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - partition = module.params['partition'] - parent_partition = module.params['parent_partition'] - state = module.params['state'] - name = module.params['name'] - parent = "/%s/%s" % (parent_partition, module.params['parent']) - monitor = "/%s/%s" % (partition, name) - send = module.params['send'] - receive = module.params['receive'] - receive_disable = module.params['receive_disable'] - ip = module.params['ip'] - port = module.params['port'] - interval = module.params['interval'] - timeout = module.params['timeout'] - time_until_up = module.params['time_until_up'] - - # end monitor specific stuff - - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") - api = bigip_api(server, user, password) - monitor_exists = check_monitor_exists(module, api, monitor, parent) - - - # ipport is a special setting - if monitor_exists: # make sure to not update current settings if not asked - cur_ipport = get_ipport(api, monitor) - if ip is None: - ip = cur_ipport['ipport']['address'] - if port is None: - port = cur_ipport['ipport']['port'] - else: # use API defaults if not defined to create it - if interval is None: - interval = 5 - if timeout is None: - timeout = 16 - if ip is None: - ip = '0.0.0.0' - if port is None: - port = 0 - if send is None: - send = '' - if receive is None: - receive = '' - if receive_disable is None: - receive_disable = '' - - # define and set address type - if ip == '0.0.0.0' and port == 0: - address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT' - elif ip == '0.0.0.0' and port != 0: - address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT' - elif ip != '0.0.0.0' and port != 0: - address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT' - else: - address_type = 'ATYPE_UNSET' - - ipport = {'address_type': address_type, - 'ipport': {'address': ip, - 'port': port}} - - template_attributes = {'parent_template': parent, - 'interval': interval, - 'timeout': timeout, - 'dest_ipport': ipport, - 'is_read_only': False, - 'is_directly_usable': True} - - # monitor specific stuff - template_string_properties = [{'type': 'STYPE_SEND', - 'value': send}, - {'type': 'STYPE_RECEIVE', - 'value': receive}, - {'type': 'STYPE_RECEIVE_DRAIN', - 'value': receive_disable}] - - template_integer_properties = [{'type': 'ITYPE_INTERVAL', - 'value': interval}, - {'type': 'ITYPE_TIMEOUT', - 'value': timeout}, - {'type': 'ITYPE_TIME_UNTIL_UP', - 'value': time_until_up}] - - # main logic, monitor generic - - try: - result = {'changed': False} # default - - - if state == 'absent': - if monitor_exists: - if not module.check_mode: - # possible race condition if same task - # on other node deleted it first - result['changed'] |= delete_monitor(api, monitor) - else: - result['changed'] |= True - - else: # state present - ## check for monitor itself - if not monitor_exists: # create it - if not module.check_mode: - # again, check changed status here b/c race conditions - # if other task already created it - result['changed'] |= create_monitor(api, monitor, template_attributes) - else: - result['changed'] |= True - - ## check for monitor parameters - # whether it already existed, or was just created, now update - # the update functions need to check for check mode but - # cannot update settings if it doesn't exist which happens in check mode - result['changed'] |= update_monitor_properties(api, module, monitor, - template_string_properties, - template_integer_properties) - - # we just have to update the ipport if monitor already exists and it's different - if monitor_exists and cur_ipport != ipport: - set_ipport(api, monitor, ipport) - result['changed'] |= True - #else: monitor doesn't exist (check mode) or ipport is already ok - - - except Exception, e: - module.fail_json(msg="received exception: %s" % e) - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() - diff --git a/library/net_infrastructure/bigip_monitor_tcp b/library/net_infrastructure/bigip_monitor_tcp deleted file mode 100644 index 8b89a0c6113..00000000000 --- a/library/net_infrastructure/bigip_monitor_tcp +++ /dev/null @@ -1,489 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, serge van Ginderachter -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: bigip_monitor_tcp -short_description: "Manages F5 BIG-IP LTM tcp monitors" -description: - - "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API" -version_added: "1.4" -author: Serge van Ginderachter -notes: - - "Requires BIG-IP software version >= 11" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" - - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx" -requirements: - - bigsuds -options: - server: - description: - - BIG-IP host - required: true - default: null - user: - description: - - BIG-IP username - required: true - default: null - password: - description: - - BIG-IP password - required: true - default: null - state: - description: - - Monitor state - required: false - default: 'present' - choices: ['present', 'absent'] - name: - description: - - Monitor name - required: true - default: null - aliases: ['monitor'] - partition: - description: - - Partition for the monitor - required: false - default: 'Common' - type: - description: - - The template type of this monitor template - required: false - default: 'tcp' - choices: [ 'TTYPE_TCP', 'TTYPE_TCP_ECHO', 'TTYPE_TCP_HALF_OPEN'] - parent: - description: - - The parent template of this monitor template - required: false - default: 'tcp' - choices: [ 'tcp', 'tcp_echo', 'tcp_half_open'] - parent_partition: - description: - - Partition for the parent monitor - required: false - default: 'Common' - send: - description: - - The send string for the monitor call - required: true - default: none - receive: - description: - - The receive string for the monitor call - required: true - default: none - ip: - description: - - IP address part of the ipport definition. The default API setting - is "0.0.0.0". - required: false - default: none - port: - description: - - port address part op the ipport definition. The default API - setting is 0. - required: false - default: none - interval: - description: - - The interval specifying how frequently the monitor instance - of this template will run. By default, this interval is used for up and - down states. The default API setting is 5. - required: false - default: none - timeout: - description: - - The number of seconds in which the node or service must respond to - the monitor request. If the target responds within the set time - period, it is considered up. If the target does not respond within - the set time period, it is considered down. You can change this - number to any number you want, however, it should be 3 times the - interval number of seconds plus 1 second. The default API setting - is 16. - required: false - default: none - time_until_up: - description: - - Specifies the amount of time in seconds after the first successful - response before a node will be marked up. A value of 0 will cause a - node to be marked up immediately after a valid response is received - from the node. The default API setting is 0. - required: false - default: none -''' - -EXAMPLES = ''' - -- name: BIGIP F5 | Create TCP Monitor - local_action: - module: bigip_monitor_tcp - state: present - server: "{{ f5server }}" - user: "{{ f5user }}" - password: "{{ f5password }}" - name: "{{ item.monitorname }}" - type: tcp - send: "{{ item.send }}" - receive: "{{ item.receive }}" - with_items: f5monitors-tcp -- name: BIGIP F5 | Create TCP half open Monitor - local_action: - module: bigip_monitor_tcp - state: present - server: "{{ f5server }}" - user: "{{ f5user }}" - password: "{{ f5password }}" - name: "{{ item.monitorname }}" - type: tcp - send: "{{ item.send }}" - receive: "{{ item.receive }}" - with_items: f5monitors-halftcp -- name: BIGIP F5 | Remove TCP Monitor - local_action: - module: bigip_monitor_tcp - state: absent - server: "{{ f5server }}" - user: "{{ f5user }}" - password: "{{ f5password }}" - name: "{{ monitorname }}" - with_flattened: - - f5monitors-tcp - - f5monitors-halftcp - -''' - -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -TEMPLATE_TYPE = DEFAULT_TEMPLATE_TYPE = 'TTYPE_TCP' -TEMPLATE_TYPE_CHOICES = ['tcp', 'tcp_echo', 'tcp_half_open'] -DEFAULT_PARENT = DEFAULT_TEMPLATE_TYPE_CHOICE = DEFAULT_TEMPLATE_TYPE.replace('TTYPE_', '').lower() - - -# =========================================== -# bigip_monitor module generic methods. -# these should be re-useable for other monitor types -# - -def bigip_api(bigip, user, password): - - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - - -def check_monitor_exists(module, api, monitor, parent): - - # hack to determine if monitor exists - result = False - try: - ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0] - parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0] - if ttype == TEMPLATE_TYPE and parent == parent2: - result = True - else: - module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent)) - except bigsuds.OperationFailed, e: - if "was not found" in str(e): - result = False - else: - # genuine exception - raise - return result - - -def create_monitor(api, monitor, template_attributes): - - try: - api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes]) - except bigsuds.OperationFailed, e: - if "already exists" in str(e): - return False - else: - # genuine exception - raise - return True - - -def delete_monitor(api, monitor): - - try: - api.LocalLB.Monitor.delete_template(template_names=[monitor]) - except bigsuds.OperationFailed, e: - # maybe it was deleted since we checked - if "was not found" in str(e): - return False - else: - # genuine exception - raise - return True - - -def check_string_property(api, monitor, str_property): - - try: - return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0] - except bigsuds.OperationFailed, e: - # happens in check mode if not created yet - if "was not found" in str(e): - return True - else: - # genuine exception - raise - return True - - -def set_string_property(api, monitor, str_property): - - api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property]) - - -def check_integer_property(api, monitor, int_property): - - try: - return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0] - except bigsuds.OperationFailed, e: - # happens in check mode if not created yet - if "was not found" in str(e): - return True - else: - # genuine exception - raise - return True - - -def set_integer_property(api, monitor, int_property): - - api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property]) - - -def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties): - - changed = False - for str_property in template_string_properties: - if str_property['value'] is not None and not check_string_property(api, monitor, str_property): - if not module.check_mode: - set_string_property(api, monitor, str_property) - changed = True - for int_property in template_integer_properties: - if int_property['value'] is not None and not check_integer_property(api, monitor, int_property): - if not module.check_mode: - set_integer_property(api, monitor, int_property) - changed = True - - return changed - - -def get_ipport(api, monitor): - - return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0] - - -def set_ipport(api, monitor, ipport): - - try: - api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport]) - return True, "" - - except bigsuds.OperationFailed, e: - if "Cannot modify the address type of monitor" in str(e): - return False, "Cannot modify the address type of monitor if already assigned to a pool." - else: - # genuine exception - raise - -# =========================================== -# main loop -# -# writing a module for other monitor types should -# only need an updated main() (and monitor specific functions) - -def main(): - - # begin monitor specific stuff - - module = AnsibleModule( - argument_spec = dict( - server = dict(required=True), - user = dict(required=True), - password = dict(required=True), - partition = dict(default='Common'), - state = dict(default='present', choices=['present', 'absent']), - name = dict(required=True), - type = dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES), - parent = dict(default=DEFAULT_PARENT), - parent_partition = dict(default='Common'), - send = dict(required=False), - receive = dict(required=False), - ip = dict(required=False), - port = dict(required=False, type='int'), - interval = dict(required=False, type='int'), - timeout = dict(required=False, type='int'), - time_until_up = dict(required=False, type='int', default=0) - ), - supports_check_mode=True - ) - - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - partition = module.params['partition'] - parent_partition = module.params['parent_partition'] - state = module.params['state'] - name = module.params['name'] - type = 'TTYPE_' + module.params['type'].upper() - parent = "/%s/%s" % (parent_partition, module.params['parent']) - monitor = "/%s/%s" % (partition, name) - send = module.params['send'] - receive = module.params['receive'] - ip = module.params['ip'] - port = module.params['port'] - interval = module.params['interval'] - timeout = module.params['timeout'] - time_until_up = module.params['time_until_up'] - - # tcp monitor has multiple types, so overrule - global TEMPLATE_TYPE - TEMPLATE_TYPE = type - - # end monitor specific stuff - - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") - api = bigip_api(server, user, password) - monitor_exists = check_monitor_exists(module, api, monitor, parent) - - - # ipport is a special setting - if monitor_exists: # make sure to not update current settings if not asked - cur_ipport = get_ipport(api, monitor) - if ip is None: - ip = cur_ipport['ipport']['address'] - if port is None: - port = cur_ipport['ipport']['port'] - else: # use API defaults if not defined to create it - if interval is None: - interval = 5 - if timeout is None: - timeout = 16 - if ip is None: - ip = '0.0.0.0' - if port is None: - port = 0 - if send is None: - send = '' - if receive is None: - receive = '' - - # define and set address type - if ip == '0.0.0.0' and port == 0: - address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT' - elif ip == '0.0.0.0' and port != 0: - address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT' - elif ip != '0.0.0.0' and port != 0: - address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT' - else: - address_type = 'ATYPE_UNSET' - - ipport = {'address_type': address_type, - 'ipport': {'address': ip, - 'port': port}} - - template_attributes = {'parent_template': parent, - 'interval': interval, - 'timeout': timeout, - 'dest_ipport': ipport, - 'is_read_only': False, - 'is_directly_usable': True} - - # monitor specific stuff - if type == 'TTYPE_TCP': - template_string_properties = [{'type': 'STYPE_SEND', - 'value': send}, - {'type': 'STYPE_RECEIVE', - 'value': receive}] - else: - template_string_properties = [] - - template_integer_properties = [{'type': 'ITYPE_INTERVAL', - 'value': interval}, - {'type': 'ITYPE_TIMEOUT', - 'value': timeout}, - {'type': 'ITYPE_TIME_UNTIL_UP', - 'value': interval}] - - # main logic, monitor generic - - try: - result = {'changed': False} # default - - - if state == 'absent': - if monitor_exists: - if not module.check_mode: - # possible race condition if same task - # on other node deleted it first - result['changed'] |= delete_monitor(api, monitor) - else: - result['changed'] |= True - - else: # state present - ## check for monitor itself - if not monitor_exists: # create it - if not module.check_mode: - # again, check changed status here b/c race conditions - # if other task already created it - result['changed'] |= create_monitor(api, monitor, template_attributes) - else: - result['changed'] |= True - - ## check for monitor parameters - # whether it already existed, or was just created, now update - # the update functions need to check for check mode but - # cannot update settings if it doesn't exist which happens in check mode - if monitor_exists and not module.check_mode: - result['changed'] |= update_monitor_properties(api, module, monitor, - template_string_properties, - template_integer_properties) - # else assume nothing changed - - # we just have to update the ipport if monitor already exists and it's different - if monitor_exists and cur_ipport != ipport: - set_ipport(api, monitor, ipport) - result['changed'] |= True - #else: monitor doesn't exist (check mode) or ipport is already ok - - - except Exception, e: - module.fail_json(msg="received exception: %s" % e) - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() - diff --git a/library/net_infrastructure/bigip_node b/library/net_infrastructure/bigip_node deleted file mode 100644 index 68b6a2b52f1..00000000000 --- a/library/net_infrastructure/bigip_node +++ /dev/null @@ -1,294 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Matt Hite -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: bigip_node -short_description: "Manages F5 BIG-IP LTM nodes" -description: - - "Manages F5 BIG-IP LTM nodes via iControl SOAP API" -version_added: "1.4" -author: Matt Hite -notes: - - "Requires BIG-IP software version >= 11" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" -requirements: - - bigsuds -options: - server: - description: - - BIG-IP host - required: true - default: null - choices: [] - aliases: [] - user: - description: - - BIG-IP username - required: true - default: null - choices: [] - aliases: [] - password: - description: - - BIG-IP password - required: true - default: null - choices: [] - aliases: [] - state: - description: - - Pool member state - required: true - default: present - choices: ['present', 'absent'] - aliases: [] - partition: - description: - - Partition - required: false - default: 'Common' - choices: [] - aliases: [] - name: - description: - - "Node name" - required: false - default: null - choices: [] - host: - description: - - "Node IP. Required when state=present and node does not exist. Error when state=absent." - required: true - default: null - choices: [] - aliases: ['address', 'ip'] - description: - description: - - "Node description." - required: false - default: null - choices: [] -''' - -EXAMPLES = ''' - -## playbook task examples: - ---- -# file bigip-test.yml -# ... -- hosts: bigip-test - tasks: - - name: Add node - local_action: > - bigip_node - server=lb.mydomain.com - user=admin - password=mysecret - state=present - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - name="{{ ansible_default_ipv4["address"] }}" - -# Note that the BIG-IP automatically names the node using the -# IP address specified in previous play's host parameter. -# Future plays referencing this node no longer use the host -# parameter but instead use the name parameter. -# Alternatively, you could have specified a name with the -# name parameter when state=present. - - - name: Modify node description - local_action: > - bigip_node - server=lb.mydomain.com - user=admin - password=mysecret - state=present - partition=matthite - name="{{ ansible_default_ipv4["address"] }}" - description="Our best server yet" - - - name: Delete node - local_action: > - bigip_node - server=lb.mydomain.com - user=admin - password=mysecret - state=absent - partition=matthite - name="{{ ansible_default_ipv4["address"] }}" - -''' - -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -# ========================== -# bigip_node module specific -# - -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - -def node_exists(api, address): - # hack to determine if node exists - result = False - try: - api.LocalLB.NodeAddressV2.get_object_status(nodes=[address]) - result = True - except bigsuds.OperationFailed, e: - if "was not found" in str(e): - result = False - else: - # genuine exception - raise - return result - -def create_node_address(api, address, name): - try: - api.LocalLB.NodeAddressV2.create(nodes=[name], addresses=[address], limits=[0]) - result = True - desc = "" - except bigsuds.OperationFailed, e: - if "already exists" in str(e): - result = False - desc = "referenced name or IP already in use" - else: - # genuine exception - raise - return (result, desc) - -def get_node_address(api, name): - return api.LocalLB.NodeAddressV2.get_address(nodes=[name])[0] - -def delete_node_address(api, address): - try: - api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address]) - result = True - desc = "" - except bigsuds.OperationFailed, e: - if "is referenced by a member of pool" in str(e): - result = False - desc = "node referenced by pool" - else: - # genuine exception - raise - return (result, desc) - -def set_node_description(api, name, description): - api.LocalLB.NodeAddressV2.set_description(nodes=[name], - descriptions=[description]) - -def get_node_description(api, name): - return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0] - -def main(): - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - state = dict(type='str', default='present', choices=['present', 'absent']), - partition = dict(type='str', default='Common'), - name = dict(type='str', required=True), - host = dict(type='str', aliases=['address', 'ip']), - description = dict(type='str') - ), - supports_check_mode=True - ) - - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") - - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - state = module.params['state'] - partition = module.params['partition'] - host = module.params['host'] - name = module.params['name'] - address = "/%s/%s" % (partition, name) - description = module.params['description'] - - if state == 'absent' and host is not None: - module.fail_json(msg="host parameter invalid when state=absent") - - try: - api = bigip_api(server, user, password) - result = {'changed': False} # default - - if state == 'absent': - if node_exists(api, address): - if not module.check_mode: - deleted, desc = delete_node_address(api, address) - if not deleted: - module.fail_json(msg="unable to delete: %s" % desc) - else: - result = {'changed': True} - else: - # check-mode return value - result = {'changed': True} - - elif state == 'present': - if not node_exists(api, address): - if host is None: - module.fail_json(msg="host parameter required when " \ - "state=present and node does not exist") - if not module.check_mode: - created, desc = create_node_address(api, address=host, name=address) - if not created: - module.fail_json(msg="unable to create: %s" % desc) - else: - result = {'changed': True} - if description is not None: - set_node_description(api, address, description) - result = {'changed': True} - else: - # check-mode return value - result = {'changed': True} - else: - # node exists -- potentially modify attributes - if host is not None: - if get_node_address(api, address) != host: - module.fail_json(msg="Changing the node address is " \ - "not supported by the API; " \ - "delete and recreate the node.") - if description is not None: - if get_node_description(api, address) != description: - if not module.check_mode: - set_node_description(api, address, description) - result = {'changed': True} - - except Exception, e: - module.fail_json(msg="received exception: %s" % e) - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() - diff --git a/library/net_infrastructure/bigip_pool b/library/net_infrastructure/bigip_pool deleted file mode 100644 index 48d03b9f1cb..00000000000 --- a/library/net_infrastructure/bigip_pool +++ /dev/null @@ -1,536 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Matt Hite -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: bigip_pool -short_description: "Manages F5 BIG-IP LTM pools" -description: - - "Manages F5 BIG-IP LTM pools via iControl SOAP API" -version_added: "1.2" -author: Matt Hite -notes: - - "Requires BIG-IP software version >= 11" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" -requirements: - - bigsuds -options: - server: - description: - - BIG-IP host - required: true - default: null - choices: [] - aliases: [] - user: - description: - - BIG-IP username - required: true - default: null - choices: [] - aliases: [] - password: - description: - - BIG-IP password - required: true - default: null - choices: [] - aliases: [] - state: - description: - - Pool/pool member state - required: false - default: present - choices: ['present', 'absent'] - aliases: [] - name: - description: - - Pool name - required: true - default: null - choices: [] - aliases: ['pool'] - partition: - description: - - Partition of pool/pool member - required: false - default: 'Common' - choices: [] - aliases: [] - lb_method: - description: - - Load balancing method - version_added: "1.3" - required: False - default: 'round_robin' - choices: ['round_robin', 'ratio_member', 'least_connection_member', - 'observed_member', 'predictive_member', 'ratio_node_address', - 'least_connection_node_address', 'fastest_node_address', - 'observed_node_address', 'predictive_node_address', - 'dynamic_ratio', 'fastest_app_response', 'least_sessions', - 'dynamic_ratio_member', 'l3_addr', 'unknown', - 'weighted_least_connection_member', - 'weighted_least_connection_node_address', - 'ratio_session', 'ratio_least_connection_member', - 'ratio_least_connection_node_address'] - aliases: [] - monitor_type: - description: - - Monitor rule type when monitors > 1 - version_added: "1.3" - required: False - default: null - choices: ['and_list', 'm_of_n'] - aliases: [] - quorum: - description: - - Monitor quorum value when monitor_type is m_of_n - version_added: "1.3" - required: False - default: null - choices: [] - aliases: [] - monitors: - description: - - Monitor template name list. Always use the full path to the monitor. - version_added: "1.3" - required: False - default: null - choices: [] - aliases: [] - slow_ramp_time: - description: - - Sets the ramp-up time (in seconds) to gradually ramp up the load on newly added or freshly detected up pool members - version_added: "1.3" - required: False - default: null - choices: [] - aliases: [] - service_down_action: - description: - - Sets the action to take when node goes down in pool - version_added: "1.3" - required: False - default: null - choices: ['none', 'reset', 'drop', 'reselect'] - aliases: [] - host: - description: - - "Pool member IP" - required: False - default: null - choices: [] - aliases: ['address'] - port: - description: - - "Pool member port" - required: False - default: null - choices: [] - aliases: [] -''' - -EXAMPLES = ''' - -## playbook task examples: - ---- -# file bigip-test.yml -# ... -- hosts: localhost - tasks: - - name: Create pool - local_action: > - bigip_pool - server=lb.mydomain.com - user=admin - password=mysecret - state=present - name=matthite-pool - partition=matthite - lb_method=least_connection_member - slow_ramp_time=120 - - - name: Modify load balancer method - local_action: > - bigip_pool - server=lb.mydomain.com - user=admin - password=mysecret - state=present - name=matthite-pool - partition=matthite - lb_method=round_robin - -- hosts: bigip-test - tasks: - - name: Add pool member - local_action: > - bigip_pool - server=lb.mydomain.com - user=admin - password=mysecret - state=present - name=matthite-pool - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - port=80 - - - name: Remove pool member from pool - local_action: > - bigip_pool - server=lb.mydomain.com - user=admin - password=mysecret - state=absent - name=matthite-pool - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - port=80 - -- hosts: localhost - tasks: - - name: Delete pool - local_action: > - bigip_pool - server=lb.mydomain.com - user=admin - password=mysecret - state=absent - name=matthite-pool - partition=matthite - -''' - -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -# =========================================== -# bigip_pool module specific support methods. -# - -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - -def pool_exists(api, pool): - # hack to determine if pool exists - result = False - try: - api.LocalLB.Pool.get_object_status(pool_names=[pool]) - result = True - except bigsuds.OperationFailed, e: - if "was not found" in str(e): - result = False - else: - # genuine exception - raise - return result - -def create_pool(api, pool, lb_method): - # create requires lb_method but we don't want to default - # to a value on subsequent runs - if not lb_method: - lb_method = 'round_robin' - lb_method = "LB_METHOD_%s" % lb_method.strip().upper() - api.LocalLB.Pool.create_v2(pool_names=[pool], lb_methods=[lb_method], - members=[[]]) - -def remove_pool(api, pool): - api.LocalLB.Pool.delete_pool(pool_names=[pool]) - -def get_lb_method(api, pool): - lb_method = api.LocalLB.Pool.get_lb_method(pool_names=[pool])[0] - lb_method = lb_method.strip().replace('LB_METHOD_', '').lower() - return lb_method - -def set_lb_method(api, pool, lb_method): - lb_method = "LB_METHOD_%s" % lb_method.strip().upper() - api.LocalLB.Pool.set_lb_method(pool_names=[pool], lb_methods=[lb_method]) - -def get_monitors(api, pool): - result = api.LocalLB.Pool.get_monitor_association(pool_names=[pool])[0]['monitor_rule'] - monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower() - quorum = result['quorum'] - monitor_templates = result['monitor_templates'] - return (monitor_type, quorum, monitor_templates) - -def set_monitors(api, pool, monitor_type, quorum, monitor_templates): - monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper() - monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates} - monitor_association = {'pool_name': pool, 'monitor_rule': monitor_rule} - api.LocalLB.Pool.set_monitor_association(monitor_associations=[monitor_association]) - -def get_slow_ramp_time(api, pool): - result = api.LocalLB.Pool.get_slow_ramp_time(pool_names=[pool])[0] - return result - -def set_slow_ramp_time(api, pool, seconds): - api.LocalLB.Pool.set_slow_ramp_time(pool_names=[pool], values=[seconds]) - -def get_action_on_service_down(api, pool): - result = api.LocalLB.Pool.get_action_on_service_down(pool_names=[pool])[0] - result = result.split("SERVICE_DOWN_ACTION_")[-1].lower() - return result - -def set_action_on_service_down(api, pool, action): - action = "SERVICE_DOWN_ACTION_%s" % action.strip().upper() - api.LocalLB.Pool.set_action_on_service_down(pool_names=[pool], actions=[action]) - -def member_exists(api, pool, address, port): - # hack to determine if member exists - result = False - try: - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.get_member_object_status(pool_names=[pool], - members=[members]) - result = True - except bigsuds.OperationFailed, e: - if "was not found" in str(e): - result = False - else: - # genuine exception - raise - return result - -def delete_node_address(api, address): - result = False - try: - api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address]) - result = True - except bigsuds.OperationFailed, e: - if "is referenced by a member of pool" in str(e): - result = False - else: - # genuine exception - raise - return result - -def remove_pool_member(api, pool, address, port): - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members]) - -def add_pool_member(api, pool, address, port): - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members]) - -def main(): - lb_method_choices = ['round_robin', 'ratio_member', - 'least_connection_member', 'observed_member', - 'predictive_member', 'ratio_node_address', - 'least_connection_node_address', - 'fastest_node_address', 'observed_node_address', - 'predictive_node_address', 'dynamic_ratio', - 'fastest_app_response', 'least_sessions', - 'dynamic_ratio_member', 'l3_addr', 'unknown', - 'weighted_least_connection_member', - 'weighted_least_connection_node_address', - 'ratio_session', 'ratio_least_connection_member', - 'ratio_least_connection_node_address'] - - monitor_type_choices = ['and_list', 'm_of_n'] - - service_down_choices = ['none', 'reset', 'drop', 'reselect'] - - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - state = dict(type='str', default='present', choices=['present', 'absent']), - name = dict(type='str', required=True, aliases=['pool']), - partition = dict(type='str', default='Common'), - lb_method = dict(type='str', choices=lb_method_choices), - monitor_type = dict(type='str', choices=monitor_type_choices), - quorum = dict(type='int'), - monitors = dict(type='list'), - slow_ramp_time = dict(type='int'), - service_down_action = dict(type='str', choices=service_down_choices), - host = dict(type='str', aliases=['address']), - port = dict(type='int') - ), - supports_check_mode=True - ) - - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") - - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - state = module.params['state'] - name = module.params['name'] - partition = module.params['partition'] - pool = "/%s/%s" % (partition, name) - lb_method = module.params['lb_method'] - if lb_method: - lb_method = lb_method.lower() - monitor_type = module.params['monitor_type'] - if monitor_type: - monitor_type = monitor_type.lower() - quorum = module.params['quorum'] - monitors = module.params['monitors'] - if monitors: - monitors = [] - for monitor in module.params['monitors']: - if "/" not in monitor: - monitors.append("/%s/%s" % (partition, monitor)) - else: - monitors.append(monitor) - slow_ramp_time = module.params['slow_ramp_time'] - service_down_action = module.params['service_down_action'] - if service_down_action: - service_down_action = service_down_action.lower() - host = module.params['host'] - address = "/%s/%s" % (partition, host) - port = module.params['port'] - - # sanity check user supplied values - - if (host and not port) or (port and not host): - module.fail_json(msg="both host and port must be supplied") - - if 1 > port > 65535: - module.fail_json(msg="valid ports must be in range 1 - 65535") - - if monitors: - if len(monitors) == 1: - # set default required values for single monitor - quorum = 0 - monitor_type = 'single' - elif len(monitors) > 1: - if not monitor_type: - module.fail_json(msg="monitor_type required for monitors > 1") - if monitor_type == 'm_of_n' and not quorum: - module.fail_json(msg="quorum value required for monitor_type m_of_n") - if monitor_type != 'm_of_n': - quorum = 0 - elif monitor_type: - # no monitors specified but monitor_type exists - module.fail_json(msg="monitor_type require monitors parameter") - elif quorum is not None: - # no monitors specified but quorum exists - module.fail_json(msg="quorum requires monitors parameter") - - try: - api = bigip_api(server, user, password) - result = {'changed': False} # default - - if state == 'absent': - if host and port and pool: - # member removal takes precedent - if pool_exists(api, pool) and member_exists(api, pool, address, port): - if not module.check_mode: - remove_pool_member(api, pool, address, port) - deleted = delete_node_address(api, address) - result = {'changed': True, 'deleted': deleted} - else: - result = {'changed': True} - elif pool_exists(api, pool): - # no host/port supplied, must be pool removal - if not module.check_mode: - # hack to handle concurrent runs of module - # pool might be gone before we actually remove it - try: - remove_pool(api, pool) - result = {'changed': True} - except bigsuds.OperationFailed, e: - if "was not found" in str(e): - result = {'changed': False} - else: - # genuine exception - raise - else: - # check-mode return value - result = {'changed': True} - - elif state == 'present': - update = False - if not pool_exists(api, pool): - # pool does not exist -- need to create it - if not module.check_mode: - # a bit of a hack to handle concurrent runs of this module. - # even though we've checked the pool doesn't exist, - # it may exist by the time we run create_pool(). - # this catches the exception and does something smart - # about it! - try: - create_pool(api, pool, lb_method) - result = {'changed': True} - except bigsuds.OperationFailed, e: - if "already exists" in str(e): - update = True - else: - # genuine exception - raise - else: - if monitors: - set_monitors(api, pool, monitor_type, quorum, monitors) - if slow_ramp_time: - set_slow_ramp_time(api, pool, slow_ramp_time) - if service_down_action: - set_action_on_service_down(api, pool, service_down_action) - if host and port: - add_pool_member(api, pool, address, port) - else: - # check-mode return value - result = {'changed': True} - else: - # pool exists -- potentially modify attributes - update = True - - if update: - if lb_method and lb_method != get_lb_method(api, pool): - if not module.check_mode: - set_lb_method(api, pool, lb_method) - result = {'changed': True} - if monitors: - t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, pool) - if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)): - if not module.check_mode: - set_monitors(api, pool, monitor_type, quorum, monitors) - result = {'changed': True} - if slow_ramp_time and slow_ramp_time != get_slow_ramp_time(api, pool): - if not module.check_mode: - set_slow_ramp_time(api, pool, slow_ramp_time) - result = {'changed': True} - if service_down_action and service_down_action != get_action_on_service_down(api, pool): - if not module.check_mode: - set_action_on_service_down(api, pool, service_down_action) - result = {'changed': True} - if (host and port) and not member_exists(api, pool, address, port): - if not module.check_mode: - add_pool_member(api, pool, address, port) - result = {'changed': True} - - except Exception, e: - module.fail_json(msg="received exception: %s" % e) - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() - diff --git a/library/net_infrastructure/bigip_pool_member b/library/net_infrastructure/bigip_pool_member deleted file mode 100644 index 5aef9f0ae98..00000000000 --- a/library/net_infrastructure/bigip_pool_member +++ /dev/null @@ -1,378 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Matt Hite -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: bigip_pool_member -short_description: "Manages F5 BIG-IP LTM pool members" -description: - - "Manages F5 BIG-IP LTM pool members via iControl SOAP API" -version_added: "1.4" -author: Matt Hite -notes: - - "Requires BIG-IP software version >= 11" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" - - "Supersedes bigip_pool for managing pool members" - -requirements: - - bigsuds -options: - server: - description: - - BIG-IP host - required: true - default: null - choices: [] - aliases: [] - user: - description: - - BIG-IP username - required: true - default: null - choices: [] - aliases: [] - password: - description: - - BIG-IP password - required: true - default: null - choices: [] - aliases: [] - state: - description: - - Pool member state - required: true - default: present - choices: ['present', 'absent'] - aliases: [] - pool: - description: - - Pool name. This pool must exist. - required: true - default: null - choices: [] - aliases: [] - partition: - description: - - Partition - required: false - default: 'Common' - choices: [] - aliases: [] - host: - description: - - Pool member IP - required: true - default: null - choices: [] - aliases: ['address', 'name'] - port: - description: - - Pool member port - required: true - default: null - choices: [] - aliases: [] - connection_limit: - description: - - Pool member connection limit. Setting this to 0 disables the limit. - required: false - default: null - choices: [] - aliases: [] - description: - description: - - Pool member description - required: false - default: null - choices: [] - aliases: [] - rate_limit: - description: - - Pool member rate limit (connections-per-second). Setting this to 0 disables the limit. - required: false - default: null - choices: [] - aliases: [] - ratio: - description: - - Pool member ratio weight. Valid values range from 1 through 100. New pool members -- unless overriden with this value -- default to 1. - required: false - default: null - choices: [] - aliases: [] -''' - -EXAMPLES = ''' - -## playbook task examples: - ---- -# file bigip-test.yml -# ... -- hosts: bigip-test - tasks: - - name: Add pool member - local_action: > - bigip_pool_member - server=lb.mydomain.com - user=admin - password=mysecret - state=present - pool=matthite-pool - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - port=80 - description="web server" - connection_limit=100 - rate_limit=50 - ratio=2 - - - name: Modify pool member ratio and description - local_action: > - bigip_pool_member - server=lb.mydomain.com - user=admin - password=mysecret - state=present - pool=matthite-pool - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - port=80 - ratio=1 - description="nginx server" - - - name: Remove pool member from pool - local_action: > - bigip_pool_member - server=lb.mydomain.com - user=admin - password=mysecret - state=absent - pool=matthite-pool - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - port=80 - -''' - -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -# =========================================== -# bigip_pool_member module specific support methods. -# - -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - -def pool_exists(api, pool): - # hack to determine if pool exists - result = False - try: - api.LocalLB.Pool.get_object_status(pool_names=[pool]) - result = True - except bigsuds.OperationFailed, e: - if "was not found" in str(e): - result = False - else: - # genuine exception - raise - return result - -def member_exists(api, pool, address, port): - # hack to determine if member exists - result = False - try: - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.get_member_object_status(pool_names=[pool], - members=[members]) - result = True - except bigsuds.OperationFailed, e: - if "was not found" in str(e): - result = False - else: - # genuine exception - raise - return result - -def delete_node_address(api, address): - result = False - try: - api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address]) - result = True - except bigsuds.OperationFailed, e: - if "is referenced by a member of pool" in str(e): - result = False - else: - # genuine exception - raise - return result - -def remove_pool_member(api, pool, address, port): - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members]) - -def add_pool_member(api, pool, address, port): - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members]) - -def get_connection_limit(api, pool, address, port): - members = [{'address': address, 'port': port}] - result = api.LocalLB.Pool.get_member_connection_limit(pool_names=[pool], members=[members])[0][0] - return result - -def set_connection_limit(api, pool, address, port, limit): - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.set_member_connection_limit(pool_names=[pool], members=[members], limits=[[limit]]) - -def get_description(api, pool, address, port): - members = [{'address': address, 'port': port}] - result = api.LocalLB.Pool.get_member_description(pool_names=[pool], members=[members])[0][0] - return result - -def set_description(api, pool, address, port, description): - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.set_member_description(pool_names=[pool], members=[members], descriptions=[[description]]) - -def get_rate_limit(api, pool, address, port): - members = [{'address': address, 'port': port}] - result = api.LocalLB.Pool.get_member_rate_limit(pool_names=[pool], members=[members])[0][0] - return result - -def set_rate_limit(api, pool, address, port, limit): - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.set_member_rate_limit(pool_names=[pool], members=[members], limits=[[limit]]) - -def get_ratio(api, pool, address, port): - members = [{'address': address, 'port': port}] - result = api.LocalLB.Pool.get_member_ratio(pool_names=[pool], members=[members])[0][0] - return result - -def set_ratio(api, pool, address, port, ratio): - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.set_member_ratio(pool_names=[pool], members=[members], ratios=[[ratio]]) - -def main(): - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - state = dict(type='str', default='present', choices=['present', 'absent']), - pool = dict(type='str', required=True), - partition = dict(type='str', default='Common'), - host = dict(type='str', required=True, aliases=['address', 'name']), - port = dict(type='int', required=True), - connection_limit = dict(type='int'), - description = dict(type='str'), - rate_limit = dict(type='int'), - ratio = dict(type='int') - ), - supports_check_mode=True - ) - - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") - - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - state = module.params['state'] - partition = module.params['partition'] - pool = "/%s/%s" % (partition, module.params['pool']) - connection_limit = module.params['connection_limit'] - description = module.params['description'] - rate_limit = module.params['rate_limit'] - ratio = module.params['ratio'] - host = module.params['host'] - address = "/%s/%s" % (partition, host) - port = module.params['port'] - - # sanity check user supplied values - - if (host and not port) or (port and not host): - module.fail_json(msg="both host and port must be supplied") - - if 1 > port > 65535: - module.fail_json(msg="valid ports must be in range 1 - 65535") - - try: - api = bigip_api(server, user, password) - if not pool_exists(api, pool): - module.fail_json(msg="pool %s does not exist" % pool) - result = {'changed': False} # default - - if state == 'absent': - if member_exists(api, pool, address, port): - if not module.check_mode: - remove_pool_member(api, pool, address, port) - deleted = delete_node_address(api, address) - result = {'changed': True, 'deleted': deleted} - else: - result = {'changed': True} - - elif state == 'present': - if not member_exists(api, pool, address, port): - if not module.check_mode: - add_pool_member(api, pool, address, port) - if connection_limit is not None: - set_connection_limit(api, pool, address, port, connection_limit) - if description is not None: - set_description(api, pool, address, port, description) - if rate_limit is not None: - set_rate_limit(api, pool, address, port, rate_limit) - if ratio is not None: - set_ratio(api, pool, address, port, ratio) - result = {'changed': True} - else: - # pool member exists -- potentially modify attributes - if connection_limit is not None and connection_limit != get_connection_limit(api, pool, address, port): - if not module.check_mode: - set_connection_limit(api, pool, address, port, connection_limit) - result = {'changed': True} - if description is not None and description != get_description(api, pool, address, port): - if not module.check_mode: - set_description(api, pool, address, port, description) - result = {'changed': True} - if rate_limit is not None and rate_limit != get_rate_limit(api, pool, address, port): - if not module.check_mode: - set_rate_limit(api, pool, address, port, rate_limit) - result = {'changed': True} - if ratio is not None and ratio != get_ratio(api, pool, address, port): - if not module.check_mode: - set_ratio(api, pool, address, port, ratio) - result = {'changed': True} - - except Exception, e: - module.fail_json(msg="received exception: %s" % e) - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() - diff --git a/library/net_infrastructure/dnsimple b/library/net_infrastructure/dnsimple deleted file mode 100755 index 19b167dee19..00000000000 --- a/library/net_infrastructure/dnsimple +++ /dev/null @@ -1,302 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: dnsimple -version_added: "1.6" -short_description: Interface with dnsimple.com (a DNS hosting service). -description: - - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)" -options: - account_email: - description: - - "Account email. If omitted, the env variables DNSIMPLE_EMAIL and DNSIMPLE_API_TOKEN will be looked for. If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)" - required: false - default: null - - account_api_token: - description: - - Account API token. See I(account_email) for info. - required: false - default: null - - domain: - description: - - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. If omitted, a list of domains will be returned. - - If domain is present but the domain doesn't exist, it will be created. - required: false - default: null - - record: - description: - - Record to add, if blank a record for the domain will be created, supports the wildcard (*) - required: false - default: null - - record_ids: - description: - - List of records to ensure they either exist or don't exist - required: false - default: null - - type: - description: - - The type of DNS record to create - required: false - choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ] - default: null - - ttl: - description: - - The TTL to give the new record - required: false - default: 3600 (one hour) - - value: - description: - - Record value - - "Must be specified when trying to ensure a record exists" - required: false - default: null - - priority: - description: - - Record priority - required: false - default: null - - state: - description: - - whether the record should exist or not - required: false - choices: [ 'present', 'absent' ] - default: null - - solo: - description: - - Whether the record should be the only one for that record type and record name. Only use with state=present on a record - required: false - default: null - -requirements: [ dnsimple ] -author: Alex Coomans -''' - -EXAMPLES = ''' -# authenicate using email and API token -- local_action: dnsimple account_email=test@example.com account_api_token=dummyapitoken - -# fetch all domains -- local_action dnsimple - register: domains - -# fetch my.com domain records -- local_action: dnsimple domain=my.com state=present - register: records - -# delete a domain -- local_action: dnsimple domain=my.com state=absent - -# create a test.my.com A record to point to 127.0.0.01 -- local_action: dnsimple domain=my.com record=test type=A value=127.0.0.1 - register: record - -# and then delete it -- local_action: dnsimple domain=my.com record_ids={{ record['id'] }} - -# create a my.com CNAME record to example.com -- local_action: dnsimple domain=my.com record= type=CNAME value=example.com state=present - -# change it's ttl -- local_action: dnsimple domain=my.com record= type=CNAME value=example.com ttl=600 state=present - -# and delete the record -- local_action: dnsimpledomain=my.com record= type=CNAME value=example.com state=absent - -''' - -import os -try: - from dnsimple import DNSimple - from dnsimple.dnsimple import DNSimpleException -except ImportError: - print "failed=True msg='dnsimple required for this module'" - sys.exit(1) - -def main(): - module = AnsibleModule( - argument_spec = dict( - account_email = dict(required=False), - account_api_token = dict(required=False, no_log=True), - domain = dict(required=False), - record = dict(required=False), - record_ids = dict(required=False, type='list'), - type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']), - ttl = dict(required=False, default=3600, type='int'), - value = dict(required=False), - priority = dict(required=False, type='int'), - state = dict(required=False, choices=['present', 'absent']), - solo = dict(required=False, type='bool'), - ), - required_together = ( - ['record', 'value'] - ), - supports_check_mode = True, - ) - - account_email = module.params.get('account_email') - account_api_token = module.params.get('account_api_token') - domain = module.params.get('domain') - record = module.params.get('record') - record_ids = module.params.get('record_ids') - record_type = module.params.get('type') - ttl = module.params.get('ttl') - value = module.params.get('value') - priority = module.params.get('priority') - state = module.params.get('state') - is_solo = module.params.get('solo') - - if account_email and account_api_token: - client = DNSimple(email=account_email, api_token=account_api_token) - elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'): - client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN')) - else: - client = DNSimple() - - try: - # Let's figure out what operation we want to do - - # No domain, return a list - if not domain: - domains = client.domains() - module.exit_json(changed=False, result=[d['domain'] for d in domains]) - - # Domain & No record - if domain and record is None and not record_ids: - domains = [d['domain'] for d in client.domains()] - if domain.isdigit(): - dr = next((d for d in domains if d['id'] == int(domain)), None) - else: - dr = next((d for d in domains if d['name'] == domain), None) - if state == 'present': - if dr: - module.exit_json(changed=False, result=dr) - else: - if module.check_mode: - module.exit_json(changed=True) - else: - module.exit_json(changed=True, result=client.add_domain(domain)['domain']) - elif state == 'absent': - if dr: - if not module.check_mode: - client.delete(domain) - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - else: - module.fail_json(msg="'%s' is an unknown value for the state argument" % state) - - # need the not none check since record could be an empty string - if domain and record is not None: - records = [r['record'] for r in client.records(str(domain))] - - if not record_type: - module.fail_json(msg="Missing the record type") - - if not value: - module.fail_json(msg="Missing the record value") - - rr = next((r for r in records if r['name'] == record and r['record_type'] == record_type and r['content'] == value), None) - - if state == 'present': - changed = False - if is_solo: - # delete any records that have the same name and record type - same_type = [r['id'] for r in records if r['name'] == record and r['record_type'] == record_type] - if rr: - same_type = [rid for rid in same_type if rid != rr['id']] - if same_type: - if not module.check_mode: - for rid in same_type: - client.delete_record(str(domain), rid) - changed = True - if rr: - # check if we need to update - if rr['ttl'] != ttl or rr['prio'] != priority: - data = {} - if ttl: data['ttl'] = ttl - if priority: data['prio'] = priority - if module.check_mode: - module.exit_json(changed=True) - else: - module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record']) - else: - module.exit_json(changed=changed, result=rr) - else: - # create it - data = { - 'name': record, - 'record_type': record_type, - 'content': value, - } - if ttl: data['ttl'] = ttl - if priority: data['prio'] = priority - if module.check_mode: - module.exit_json(changed=True) - else: - module.exit_json(changed=True, result=client.add_record(str(domain), data)['record']) - elif state == 'absent': - if rr: - if not module.check_mode: - client.delete_record(str(domain), rr['id']) - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - else: - module.fail_json(msg="'%s' is an unknown value for the state argument" % state) - - # Make sure these record_ids either all exist or none - if domain and record_ids: - current_records = [str(r['record']['id']) for r in client.records(str(domain))] - wanted_records = [str(r) for r in record_ids] - if state == 'present': - difference = list(set(wanted_records) - set(current_records)) - if difference: - module.fail_json(msg="Missing the following records: %s" % difference) - else: - module.exit_json(changed=False) - elif state == 'absent': - difference = list(set(wanted_records) & set(current_records)) - if difference: - if not module.check_mode: - for rid in difference: - client.delete_record(str(domain), rid) - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - else: - module.fail_json(msg="'%s' is an unknown value for the state argument" % state) - - except DNSimpleException, e: - module.fail_json(msg="Unable to contact DNSimple: %s" % e.message) - - module.fail_json(msg="Unknown what you wanted me to do") - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/net_infrastructure/dnsmadeeasy b/library/net_infrastructure/dnsmadeeasy deleted file mode 100644 index 148e25a5011..00000000000 --- a/library/net_infrastructure/dnsmadeeasy +++ /dev/null @@ -1,329 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: dnsmadeeasy -version_added: "1.3" -short_description: Interface with dnsmadeeasy.com (a DNS hosting service). -description: - - "Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or monitor/account support yet. See: U(http://www.dnsmadeeasy.com/services/rest-api/)" -options: - account_key: - description: - - Accout API Key. - required: true - default: null - - account_secret: - description: - - Accout Secret Key. - required: true - default: null - - domain: - description: - - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster resolution. - required: true - default: null - - record_name: - description: - - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless of the state argument. - required: false - default: null - - record_type: - description: - - Record type. - required: false - choices: [ 'A', 'AAAA', 'CNAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ] - default: null - - record_value: - description: - - "Record value. HTTPRED: , MX: , NS: , PTR: , SRV: , TXT: " - - "If record_value is not specified; no changes will be made and the record will be returned in 'result' (in other words, this module can be used to fetch a record's current id, type, and ttl)" - required: false - default: null - - record_ttl: - description: - - record's "Time to live". Number of seconds the record remains cached in DNS servers. - required: false - default: 1800 - - state: - description: - - whether the record should exist or not - required: true - choices: [ 'present', 'absent' ] - default: null - - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 - -notes: - - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP. - - This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks. - -requirements: [ urllib, urllib2, hashlib, hmac ] -author: Brice Burgess -''' - -EXAMPLES = ''' -# fetch my.com domain records -- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present - register: response - -# create / ensure the presence of a record -- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" record_type="A" record_value="127.0.0.1" - -# update the previously created record -- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" record_value="192.168.0.1" - -# fetch a specific record -- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" - register: response - -# delete a record / ensure it is absent -- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=absent record_name="test" -''' - -# ============================================ -# DNSMadeEasy module specific support methods. -# - -IMPORT_ERROR = None -try: - import json - from time import strftime, gmtime - import hashlib - import hmac -except ImportError, e: - IMPORT_ERROR = str(e) - -class DME2: - - def __init__(self, apikey, secret, domain, module): - self.module = module - - self.api = apikey - self.secret = secret - self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/' - self.domain = str(domain) - self.domain_map = None # ["domain_name"] => ID - self.record_map = None # ["record_name"] => ID - self.records = None # ["record_ID"] => - - # Lookup the domain ID if passed as a domain name vs. ID - if not self.domain.isdigit(): - self.domain = self.getDomainByName(self.domain)['id'] - - self.record_url = 'dns/managed/' + str(self.domain) + '/records' - - def _headers(self): - currTime = self._get_date() - hashstring = self._create_hash(currTime) - headers = {'x-dnsme-apiKey': self.api, - 'x-dnsme-hmac': hashstring, - 'x-dnsme-requestDate': currTime, - 'content-type': 'application/json'} - return headers - - def _get_date(self): - return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()) - - def _create_hash(self, rightnow): - return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest() - - def query(self, resource, method, data=None): - url = self.baseurl + resource - if data and not isinstance(data, basestring): - data = urllib.urlencode(data) - - response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers()) - if info['status'] not in (200, 201, 204): - self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg'])) - - try: - return json.load(response) - except Exception, e: - return {} - - def getDomain(self, domain_id): - if not self.domain_map: - self._instMap('domain') - - return self.domains.get(domain_id, False) - - def getDomainByName(self, domain_name): - if not self.domain_map: - self._instMap('domain') - - return self.getDomain(self.domain_map.get(domain_name, 0)) - - def getDomains(self): - return self.query('dns/managed', 'GET')['data'] - - def getRecord(self, record_id): - if not self.record_map: - self._instMap('record') - - return self.records.get(record_id, False) - - def getRecordByName(self, record_name): - if not self.record_map: - self._instMap('record') - - return self.getRecord(self.record_map.get(record_name, 0)) - - def getRecords(self): - return self.query(self.record_url, 'GET')['data'] - - def _instMap(self, type): - #@TODO cache this call so it's executed only once per ansible execution - map = {} - results = {} - - # iterate over e.g. self.getDomains() || self.getRecords() - for result in getattr(self, 'get' + type.title() + 's')(): - - map[result['name']] = result['id'] - results[result['id']] = result - - # e.g. self.domain_map || self.record_map - setattr(self, type + '_map', map) - setattr(self, type + 's', results) # e.g. self.domains || self.records - - def prepareRecord(self, data): - return json.dumps(data, separators=(',', ':')) - - def createRecord(self, data): - #@TODO update the cache w/ resultant record + id when impleneted - return self.query(self.record_url, 'POST', data) - - def updateRecord(self, record_id, data): - #@TODO update the cache w/ resultant record + id when impleneted - return self.query(self.record_url + '/' + str(record_id), 'PUT', data) - - def deleteRecord(self, record_id): - #@TODO remove record from the cache when impleneted - return self.query(self.record_url + '/' + str(record_id), 'DELETE') - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - account_key=dict(required=True), - account_secret=dict(required=True, no_log=True), - domain=dict(required=True), - state=dict(required=True, choices=['present', 'absent']), - record_name=dict(required=False), - record_type=dict(required=False, choices=[ - 'A', 'AAAA', 'CNAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']), - record_value=dict(required=False), - record_ttl=dict(required=False, default=1800, type='int'), - validate_certs = dict(default='yes', type='bool'), - ), - required_together=( - ['record_value', 'record_ttl', 'record_type'] - ) - ) - - if IMPORT_ERROR: - module.fail_json(msg="Import Error: " + IMPORT_ERROR) - - DME = DME2(module.params["account_key"], module.params[ - "account_secret"], module.params["domain"], module) - state = module.params["state"] - record_name = module.params["record_name"] - - # Follow Keyword Controlled Behavior - if not record_name: - domain_records = DME.getRecords() - if not domain_records: - module.fail_json( - msg="The requested domain name is not accessible with this api_key; try using its ID if known.") - module.exit_json(changed=False, result=domain_records) - - # Fetch existing record + Build new one - current_record = DME.getRecordByName(record_name) - new_record = {'name': record_name} - for i in ["record_value", "record_type", "record_ttl"]: - if module.params[i]: - new_record[i[len("record_"):]] = module.params[i] - - # Compare new record against existing one - changed = False - if current_record: - for i in new_record: - if str(current_record[i]) != str(new_record[i]): - changed = True - new_record['id'] = str(current_record['id']) - - # Follow Keyword Controlled Behavior - if state == 'present': - # return the record if no value is specified - if not "value" in new_record: - if not current_record: - module.fail_json( - msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, domain)) - module.exit_json(changed=False, result=current_record) - - # create record as it does not exist - if not current_record: - record = DME.createRecord(DME.prepareRecord(new_record)) - module.exit_json(changed=True, result=record) - - # update the record - if changed: - DME.updateRecord( - current_record['id'], DME.prepareRecord(new_record)) - module.exit_json(changed=True, result=new_record) - - # return the record (no changes) - module.exit_json(changed=False, result=current_record) - - elif state == 'absent': - # delete the record if it exists - if current_record: - DME.deleteRecord(current_record['id']) - module.exit_json(changed=True) - - # record does not exist, return w/o change. - module.exit_json(changed=False) - - else: - module.fail_json( - msg="'%s' is an unknown value for the state argument" % state) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/net_infrastructure/lldp b/library/net_infrastructure/lldp deleted file mode 100755 index 6b8836852f6..00000000000 --- a/library/net_infrastructure/lldp +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/python -tt -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import subprocess - -DOCUMENTATION = ''' ---- -module: lldp -version_added: 1.6 -short_description: get details reported by lldp -description: - - Reads data out of lldpctl -options: {} -author: Andy Hill -notes: - - Requires lldpd running and lldp enabled on switches -''' - -EXAMPLES = ''' -# Retrieve switch/port information - - name: Gather information from lldp - lldp: - - - name: Print each switch/port - debug: msg="{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifalias'] }} - with_items: lldp.keys() - -# TASK: [Print each switch/port] *********************************************************** -# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"} -# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"} -# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"} - -''' - -def gather_lldp(): - cmd = ['lldpctl', '-f', 'keyvalue'] - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) - (output, err) = proc.communicate() - if output: - output_dict = {} - lldp_entries = output.split("\n") - - for entry in lldp_entries: - if entry: - path, value = entry.strip().split("=", 1) - path = path.split(".") - path_components, final = path[:-1], path[-1] - - current_dict = output_dict - for path_component in path_components: - current_dict[path_component] = current_dict.get(path_component, {}) - current_dict = current_dict[path_component] - current_dict[final] = value - return output_dict - - -def main(): - module = AnsibleModule({}) - - lldp_output = gather_lldp() - try: - data = {'lldp': lldp_output['lldp']} - module.exit_json(ansible_facts=data) - except TypeError: - module.fail_json(msg="lldpctl command failed. is lldpd running?") - -# import module snippets -from ansible.module_utils.basic import * -main() - diff --git a/library/net_infrastructure/netscaler b/library/net_infrastructure/netscaler deleted file mode 100644 index de3c8fc2421..00000000000 --- a/library/net_infrastructure/netscaler +++ /dev/null @@ -1,190 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Ansible module to manage Citrix NetScaler entities -(c) 2013, Nandor Sivok - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: netscaler -version_added: "1.1" -short_description: Manages Citrix NetScaler entities -description: - - Manages Citrix NetScaler server and service entities. -options: - nsc_host: - description: - - hostname or ip of your netscaler - required: true - default: null - aliases: [] - nsc_protocol: - description: - - protocol used to access netscaler - required: false - default: https - aliases: [] - user: - description: - - username - required: true - default: null - aliases: [] - password: - description: - - password - required: true - default: null - aliases: [] - action: - description: - - the action you want to perform on the entity - required: false - default: disable - choices: ["enable", "disable"] - aliases: [] - name: - description: - - name of the entity - required: true - default: hostname - aliases: [] - type: - description: - - type of the entity - required: false - default: server - choices: ["server", "service"] - aliases: [] - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - -requirements: [ "urllib", "urllib2" ] -author: Nandor Sivok -''' - -EXAMPLES = ''' -# Disable the server -ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass" - -# Enable the server -ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass action=enable" - -# Disable the service local:8080 -ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass name=local:8080 type=service action=disable" -''' - - -import json -import base64 -import socket - - -class netscaler(object): - - _nitro_base_url = '/nitro/v1/' - - def __init__(self, module): - self.module = module - - def http_request(self, api_endpoint, data_json={}): - request_url = self._nsc_protocol + '://' + self._nsc_host + self._nitro_base_url + api_endpoint - - data_json = urllib.urlencode(data_json) - if not len(data_json): - data_json = None - - auth = base64.encodestring('%s:%s' % (self._nsc_user, self._nsc_pass)).replace('\n', '').strip() - headers = { - 'Authorization': 'Basic %s' % auth, - 'Content-Type' : 'application/x-www-form-urlencoded', - } - - response, info = fetch_url(self.module, request_url, data=data_json, headers=headers) - - return json.load(response) - - def prepare_request(self, action): - resp = self.http_request( - 'config', - { - "object": - { - "params": {"action": action}, - self._type: {"name": self._name} - } - } - ) - - return resp - - -def core(module): - n = netscaler(module) - n._nsc_host = module.params.get('nsc_host') - n._nsc_user = module.params.get('user') - n._nsc_pass = module.params.get('password') - n._nsc_protocol = module.params.get('nsc_protocol') - n._name = module.params.get('name') - n._type = module.params.get('type') - action = module.params.get('action') - - r = n.prepare_request(action) - - return r['errorcode'], r - - -def main(): - - module = AnsibleModule( - argument_spec = dict( - nsc_host = dict(required=True), - nsc_protocol = dict(default='https'), - user = dict(required=True), - password = dict(required=True), - action = dict(default='enable', choices=['enable','disable']), - name = dict(default=socket.gethostname()), - type = dict(default='server', choices=['service', 'server']), - validate_certs=dict(default='yes', type='bool'), - ) - ) - - rc = 0 - try: - rc, result = core(module) - except Exception, e: - module.fail_json(msg=str(e)) - - if rc != 0: - module.fail_json(rc=rc, msg=result) - else: - result['changed'] = True - module.exit_json(**result) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() diff --git a/library/net_infrastructure/openvswitch_bridge b/library/net_infrastructure/openvswitch_bridge deleted file mode 100644 index 551ca707a2d..00000000000 --- a/library/net_infrastructure/openvswitch_bridge +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, David Stygstra -# -# This file is part of Ansible -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: openvswitch_bridge -version_added: 1.4 -author: David Stygstra -short_description: Manage Open vSwitch bridges -requirements: [ ovs-vsctl ] -description: - - Manage Open vSwitch bridges -options: - bridge: - required: true - description: - - Name of bridge to manage - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the bridge should exist - timeout: - required: false - default: 5 - description: - - How long to wait for ovs-vswitchd to respond -''' - -EXAMPLES = ''' -# Create a bridge named br-int -- openvswitch_bridge: bridge=br-int state=present -''' - - -class OVSBridge(object): - def __init__(self, module): - self.module = module - self.bridge = module.params['bridge'] - self.state = module.params['state'] - self.timeout = module.params['timeout'] - - def _vsctl(self, command): - '''Run ovs-vsctl command''' - return self.module.run_command(['ovs-vsctl', '-t', str(self.timeout)] + command) - - def exists(self): - '''Check if the bridge already exists''' - rc, _, err = self._vsctl(['br-exists', self.bridge]) - if rc == 0: # See ovs-vsctl(8) for status codes - return True - if rc == 2: - return False - raise Exception(err) - - def add(self): - '''Create the bridge''' - rc, _, err = self._vsctl(['add-br', self.bridge]) - if rc != 0: - raise Exception(err) - - def delete(self): - '''Delete the bridge''' - rc, _, err = self._vsctl(['del-br', self.bridge]) - if rc != 0: - raise Exception(err) - - def check(self): - '''Run check mode''' - try: - if self.state == 'absent' and self.exists(): - changed = True - elif self.state == 'present' and not self.exists(): - changed = True - else: - changed = False - except Exception, e: - self.module.fail_json(msg=str(e)) - self.module.exit_json(changed=changed) - - def run(self): - '''Make the necessary changes''' - changed = False - try: - if self.state == 'absent': - if self.exists(): - self.delete() - changed = True - elif self.state == 'present': - if not self.exists(): - self.add() - changed = True - except Exception, e: - self.module.fail_json(msg=str(e)) - self.module.exit_json(changed=changed) - - -def main(): - module = AnsibleModule( - argument_spec={ - 'bridge': {'required': True}, - 'state': {'default': 'present', 'choices': ['present', 'absent']}, - 'timeout': {'default': 5, 'type': 'int'} - }, - supports_check_mode=True, - ) - - br = OVSBridge(module) - if module.check_mode: - br.check() - else: - br.run() - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/net_infrastructure/openvswitch_port b/library/net_infrastructure/openvswitch_port deleted file mode 100644 index 66391937d1b..00000000000 --- a/library/net_infrastructure/openvswitch_port +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, David Stygstra -# -# This file is part of Ansible -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: openvswitch_port -version_added: 1.4 -author: David Stygstra -short_description: Manage Open vSwitch ports -requirements: [ ovs-vsctl ] -description: - - Manage Open vSwitch ports -options: - bridge: - required: true - description: - - Name of bridge to manage - port: - required: true - description: - - Name of port to manage on the bridge - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the port should exist - timeout: - required: false - default: 5 - description: - - How long to wait for ovs-vswitchd to respond -''' - -EXAMPLES = ''' -# Creates port eth2 on bridge br-ex -- openvswitch_port: bridge=br-ex port=eth2 state=present -''' - - -class OVSPort(object): - def __init__(self, module): - self.module = module - self.bridge = module.params['bridge'] - self.port = module.params['port'] - self.state = module.params['state'] - self.timeout = module.params['timeout'] - - def _vsctl(self, command): - '''Run ovs-vsctl command''' - return self.module.run_command(['ovs-vsctl', '-t', str(self.timeout)] + command) - - def exists(self): - '''Check if the port already exists''' - rc, out, err = self._vsctl(['list-ports', self.bridge]) - if rc != 0: - raise Exception(err) - return any(port.rstrip() == self.port for port in out.split('\n')) - - def add(self): - '''Add the port''' - rc, _, err = self._vsctl(['add-port', self.bridge, self.port]) - if rc != 0: - raise Exception(err) - - def delete(self): - '''Remove the port''' - rc, _, err = self._vsctl(['del-port', self.bridge, self.port]) - if rc != 0: - raise Exception(err) - - def check(self): - '''Run check mode''' - try: - if self.state == 'absent' and self.exists(): - changed = True - elif self.state == 'present' and not self.exists(): - changed = True - else: - changed = False - except Exception, e: - self.module.fail_json(msg=str(e)) - self.module.exit_json(changed=changed) - - def run(self): - '''Make the necessary changes''' - changed = False - try: - if self.state == 'absent': - if self.exists(): - self.delete() - changed = True - elif self.state == 'present': - if not self.exists(): - self.add() - changed = True - except Exception, e: - self.module.fail_json(msg=str(e)) - self.module.exit_json(changed=changed) - - -def main(): - module = AnsibleModule( - argument_spec={ - 'bridge': {'required': True}, - 'port': {'required': True}, - 'state': {'default': 'present', 'choices': ['present', 'absent']}, - 'timeout': {'default': 5, 'type': 'int'} - }, - supports_check_mode=True, - ) - - port = OVSPort(module) - if module.check_mode: - port.check() - else: - port.run() - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/network/get_url b/library/network/get_url deleted file mode 100644 index c3b81129a27..00000000000 --- a/library/network/get_url +++ /dev/null @@ -1,313 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Jan-Piet Mens -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -# see examples/playbooks/get_url.yml - -import shutil -import datetime -import re -import tempfile - -DOCUMENTATION = ''' ---- -module: get_url -short_description: Downloads files from HTTP, HTTPS, or FTP to node -description: - - Downloads files from HTTP, HTTPS, or FTP to the remote server. The remote - server I(must) have direct access to the remote resource. - - By default, if an environment variable C(_proxy) is set on - the target host, requests will be sent through that proxy. This - behaviour can be overridden by setting a variable for this task - (see `setting the environment - `_), - or by using the use_proxy option. -version_added: "0.6" -options: - url: - description: - - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path - required: true - default: null - aliases: [] - dest: - description: - - absolute path of where to download the file to. - - If C(dest) is a directory, either the server provided filename or, if - none provided, the base name of the URL on the remote server will be - used. If a directory, C(force) has no effect. - If C(dest) is a directory, the file will always be - downloaded (regardless of the force option), but replaced only if the contents changed. - required: true - default: null - force: - description: - - If C(yes) and C(dest) is not a directory, will download the file every - time and replace the file if the contents change. If C(no), the file - will only be downloaded if the destination does not exist. Generally - should be C(yes) only for small local files. Prior to 0.6, this module - behaved as if C(yes) was the default. - version_added: "0.7" - required: false - choices: [ "yes", "no" ] - default: "no" - aliases: [ "thirsty" ] - sha256sum: - description: - - If a SHA-256 checksum is passed to this parameter, the digest of the - destination file will be calculated after it is downloaded to ensure - its integrity and verify that the transfer completed successfully. - version_added: "1.3" - required: false - default: null - use_proxy: - description: - - if C(no), it will not use a proxy, even if one is defined in - an environment variable on the target hosts. - required: false - default: 'yes' - choices: ['yes', 'no'] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - timeout: - description: - - Timeout for URL request - required: false - default: 10 - version_added: '1.8' - url_username: - description: - - The username for use in HTTP basic authentication. This parameter can be used - without C(url_password) for sites that allow empty passwords. - required: false - version_added: '1.6' - url_password: - description: - - The password for use in HTTP basic authentication. If the C(url_username) - parameter is not specified, the C(url_password) parameter will not be used. - required: false - version_added: '1.6' - others: - description: - - all arguments accepted by the M(file) module also work here - required: false -notes: - - This module doesn't yet support configuration for proxies. -# informational: requirements for nodes -requirements: [ urllib2, urlparse ] -author: Jan-Piet Mens -''' - -EXAMPLES=''' -- name: download foo.conf - get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf mode=0440 - -- name: download file with sha256 check - get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf sha256sum=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c -''' - -try: - import hashlib - HAS_HASHLIB=True -except ImportError: - HAS_HASHLIB=False - -# ============================================================== -# url handling - -def url_filename(url): - fn = os.path.basename(urlparse.urlsplit(url)[2]) - if fn == '': - return 'index.html' - return fn - -def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10): - """ - Download data from the url and store in a temporary file. - - Return (tempfile, info about the request) - """ - - rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout) - - if info['status'] == 304: - module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', '')) - - # create a temporary file and copy content to do md5-based replacement - if info['status'] != 200: - module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest) - - fd, tempname = tempfile.mkstemp() - f = os.fdopen(fd, 'wb') - try: - shutil.copyfileobj(rsp, f) - except Exception, err: - os.remove(tempname) - module.fail_json(msg="failed to create temporary content file: %s" % str(err)) - f.close() - rsp.close() - return tempname, info - -def extract_filename_from_headers(headers): - """ - Extracts a filename from the given dict of HTTP headers. - - Looks for the content-disposition header and applies a regex. - Returns the filename if successful, else None.""" - cont_disp_regex = 'attachment; ?filename="?([^"]+)' - res = None - - if 'content-disposition' in headers: - cont_disp = headers['content-disposition'] - match = re.match(cont_disp_regex, cont_disp) - if match: - res = match.group(1) - # Try preventing any funny business. - res = os.path.basename(res) - - return res - -# ============================================================== -# main - -def main(): - - argument_spec = url_argument_spec() - argument_spec.update( - url = dict(required=True), - dest = dict(required=True), - sha256sum = dict(default=''), - timeout = dict(required=False, type='int', default=10), - ) - - module = AnsibleModule( - # not checking because of daisy chain to file module - argument_spec = argument_spec, - add_file_common_args=True - ) - - url = module.params['url'] - dest = os.path.expanduser(module.params['dest']) - force = module.params['force'] - sha256sum = module.params['sha256sum'] - use_proxy = module.params['use_proxy'] - timeout = module.params['timeout'] - - dest_is_dir = os.path.isdir(dest) - last_mod_time = None - - if not dest_is_dir and os.path.exists(dest): - if not force: - module.exit_json(msg="file already exists", dest=dest, url=url, changed=False) - - # If the file already exists, prepare the last modified time for the - # request. - mtime = os.path.getmtime(dest) - last_mod_time = datetime.datetime.utcfromtimestamp(mtime) - - # download to tmpsrc - tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout) - - # Now the request has completed, we can finally generate the final - # destination file name from the info dict. - - if dest_is_dir: - filename = extract_filename_from_headers(info) - if not filename: - # Fall back to extracting the filename from the URL. - # Pluck the URL from the info, since a redirect could have changed - # it. - filename = url_filename(info['url']) - dest = os.path.join(dest, filename) - - md5sum_src = None - md5sum_dest = None - - # raise an error if there is no tmpsrc file - if not os.path.exists(tmpsrc): - os.remove(tmpsrc) - module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg']) - if not os.access(tmpsrc, os.R_OK): - os.remove(tmpsrc) - module.fail_json( msg="Source %s not readable" % (tmpsrc)) - md5sum_src = module.md5(tmpsrc) - - # check if there is no dest file - if os.path.exists(dest): - # raise an error if copy has no permission on dest - if not os.access(dest, os.W_OK): - os.remove(tmpsrc) - module.fail_json( msg="Destination %s not writable" % (dest)) - if not os.access(dest, os.R_OK): - os.remove(tmpsrc) - module.fail_json( msg="Destination %s not readable" % (dest)) - md5sum_dest = module.md5(dest) - else: - if not os.access(os.path.dirname(dest), os.W_OK): - os.remove(tmpsrc) - module.fail_json( msg="Destination %s not writable" % (os.path.dirname(dest))) - - if md5sum_src != md5sum_dest: - try: - shutil.copyfile(tmpsrc, dest) - except Exception, err: - os.remove(tmpsrc) - module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err))) - changed = True - else: - changed = False - - # Check the digest of the destination file and ensure that it matches the - # sha256sum parameter if it is present - if sha256sum != '': - # Remove any non-alphanumeric characters, including the infamous - # Unicode zero-width space - stripped_sha256sum = re.sub(r'\W+', '', sha256sum) - - if not HAS_HASHLIB: - os.remove(dest) - module.fail_json(msg="The sha256sum parameter requires hashlib, which is available in Python 2.5 and higher") - else: - destination_checksum = module.sha256(dest) - - if stripped_sha256sum.lower() != destination_checksum: - os.remove(dest) - module.fail_json(msg="The SHA-256 checksum for %s did not match %s; it was %s." % (dest, sha256sum, destination_checksum)) - - os.remove(tmpsrc) - - # allow file attribute changes - module.params['path'] = dest - file_args = module.load_file_common_arguments(module.params) - file_args['path'] = dest - changed = module.set_fs_attributes_if_different(file_args, changed) - - # Mission complete - module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum_src, - sha256sum=sha256sum, changed=changed, msg=info.get('msg', '')) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() diff --git a/library/network/slurp b/library/network/slurp deleted file mode 100644 index 744032c2cd6..00000000000 --- a/library/network/slurp +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: slurp -version_added: historical -short_description: Slurps a file from remote nodes -description: - - This module works like M(fetch). It is used for fetching a base64- - encoded blob containing the data in a remote file. -options: - src: - description: - - The file on the remote system to fetch. This I(must) be a file, not a - directory. - required: true - default: null - aliases: [] -notes: - - "See also: M(fetch)" -requirements: [] -author: Michael DeHaan -''' - -EXAMPLES = ''' -ansible host -m slurp -a 'src=/tmp/xx' - host | success >> { - "content": "aGVsbG8gQW5zaWJsZSB3b3JsZAo=", - "encoding": "base64" - } -''' - -import base64 - -def main(): - module = AnsibleModule( - argument_spec = dict( - src = dict(required=True, aliases=['path']), - ), - supports_check_mode=True - ) - source = module.params['src'] - - if not os.path.exists(source): - module.fail_json(msg="file not found: %s" % source) - if not os.access(source, os.R_OK): - module.fail_json(msg="file is not readable: %s" % source) - - data = base64.b64encode(file(source).read()) - - module.exit_json(content=data, encoding='base64') - -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/network/uri b/library/network/uri deleted file mode 100644 index 8d62463df72..00000000000 --- a/library/network/uri +++ /dev/null @@ -1,445 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Romeo Theriault -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -# see examples/playbooks/uri.yml - -import shutil -import tempfile -import base64 -import datetime -try: - import json -except ImportError: - import simplejson as json - -DOCUMENTATION = ''' ---- -module: uri -short_description: Interacts with webservices -description: - - Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE - HTTP authentication mechanisms. -version_added: "1.1" -options: - url: - description: - - HTTP or HTTPS URL in the form (http|https)://host.domain[:port]/path - required: true - default: null - aliases: [] - dest: - description: - - path of where to download the file to (if desired). If I(dest) is a directory, the basename of the file on the remote server will be used. - required: false - default: null - user: - description: - - username for the module to use for Digest, Basic or WSSE authentication. - required: false - default: null - password: - description: - - password for the module to use for Digest, Basic or WSSE authentication. - required: false - default: null - body: - description: - - The body of the http request/response to the web service. - required: false - default: null - method: - description: - - The HTTP method of the request or response. - required: false - choices: [ "GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH" ] - default: "GET" - return_content: - description: - - Whether or not to return the body of the request as a "content" key in the dictionary result. If the reported Content-type is "application/json", then the JSON is additionally loaded into a key called C(json) in the dictionary results. - required: false - choices: [ "yes", "no" ] - default: "no" - force_basic_auth: - description: - - httplib2, the library used by the uri module only sends authentication information when a webservice - responds to an initial request with a 401 status. Since some basic auth services do not properly - send a 401, logins will fail. This option forces the sending of the Basic authentication header - upon initial request. - required: false - choices: [ "yes", "no" ] - default: "no" - follow_redirects: - description: - - Whether or not the URI module should follow redirects. C(all) will follow all redirects. - C(safe) will follow only "safe" redirects, where "safe" means that the client is only - doing a GET or HEAD on the URI to which it is being redirected. C(none) will not follow - any redirects. Note that C(yes) and C(no) choices are accepted for backwards compatibility, - where C(yes) is the equivalent of C(all) and C(no) is the equivalent of C(safe). C(yes) and C(no) - are deprecated and will be removed in some future version of Ansible. - required: false - choices: [ "all", "safe", "none" ] - default: "safe" - creates: - description: - - a filename, when it already exists, this step will not be run. - required: false - removes: - description: - - a filename, when it does not exist, this step will not be run. - required: false - status_code: - description: - - A valid, numeric, HTTP status code that signifies success of the request. Can also be comma separated list of status codes. - required: false - default: 200 - timeout: - description: - - The socket level timeout in seconds - required: false - default: 30 - HEADER_: - description: - - Any parameter starting with "HEADER_" is a sent with your request as a header. - For example, HEADER_Content-Type="application/json" would send the header - "Content-Type" along with your request with a value of "application/json". - required: false - default: null - others: - description: - - all arguments accepted by the M(file) module also work here - required: false - -# informational: requirements for nodes -requirements: [ urlparse, httplib2 ] -author: Romeo Theriault -''' - -EXAMPLES = ''' -# Check that you can connect (GET) to a page and it returns a status 200 -- uri: url=http://www.example.com - -# Check that a page returns a status 200 and fail if the word AWESOME is not in the page contents. -- action: uri url=http://www.example.com return_content=yes - register: webpage - -- action: fail - when: 'AWESOME' not in "{{ webpage.content }}" - - -# Create a JIRA issue - -- uri: url=https://your.jira.example.com/rest/api/2/issue/ - method=POST user=your_username password=your_pass - body="{{ lookup('file','issue.json') }}" force_basic_auth=yes - status_code=201 HEADER_Content-Type="application/json" - -# Login to a form based webpage, then use the returned cookie to -# access the app in later tasks - -- uri: url=https://your.form.based.auth.examle.com/index.php - method=POST body="name=your_username&password=your_password&enter=Sign%20in" - status_code=302 HEADER_Content-Type="application/x-www-form-urlencoded" - register: login - -- uri: url=https://your.form.based.auth.example.com/dashboard.php - method=GET return_content=yes HEADER_Cookie="{{login.set_cookie}}" - -# Queue build of a project in Jenkins: - -- uri: url=http://{{jenkins.host}}/job/{{jenkins.job}}/build?token={{jenkins.token}} - method=GET user={{jenkins.user}} password={{jenkins.password}} force_basic_auth=yes status_code=201 - -''' - -HAS_HTTPLIB2 = True -try: - import httplib2 -except ImportError: - HAS_HTTPLIB2 = False - -HAS_URLPARSE = True - -try: - import urlparse - import socket -except ImportError: - HAS_URLPARSE = False - - -def write_file(module, url, dest, content): - # create a tempfile with some test content - fd, tmpsrc = tempfile.mkstemp() - f = open(tmpsrc, 'wb') - try: - f.write(content) - except Exception, err: - os.remove(tmpsrc) - module.fail_json(msg="failed to create temporary content file: %s" % str(err)) - f.close() - - md5sum_src = None - md5sum_dest = None - - # raise an error if there is no tmpsrc file - if not os.path.exists(tmpsrc): - os.remove(tmpsrc) - module.fail_json(msg="Source %s does not exist" % (tmpsrc)) - if not os.access(tmpsrc, os.R_OK): - os.remove(tmpsrc) - module.fail_json( msg="Source %s not readable" % (tmpsrc)) - md5sum_src = module.md5(tmpsrc) - - # check if there is no dest file - if os.path.exists(dest): - # raise an error if copy has no permission on dest - if not os.access(dest, os.W_OK): - os.remove(tmpsrc) - module.fail_json( msg="Destination %s not writable" % (dest)) - if not os.access(dest, os.R_OK): - os.remove(tmpsrc) - module.fail_json( msg="Destination %s not readable" % (dest)) - md5sum_dest = module.md5(dest) - else: - if not os.access(os.path.dirname(dest), os.W_OK): - os.remove(tmpsrc) - module.fail_json( msg="Destination dir %s not writable" % (os.path.dirname(dest))) - - if md5sum_src != md5sum_dest: - try: - shutil.copyfile(tmpsrc, dest) - except Exception, err: - os.remove(tmpsrc) - module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err))) - - os.remove(tmpsrc) - - -def url_filename(url): - fn = os.path.basename(urlparse.urlsplit(url)[2]) - if fn == '': - return 'index.html' - return fn - - -def uri(module, url, dest, user, password, body, method, headers, redirects, socket_timeout): - # To debug - #httplib2.debug = 4 - - # Handle Redirects - if redirects == "all" or redirects == "yes": - follow_redirects = True - follow_all_redirects = True - elif redirects == "none": - follow_redirects = False - follow_all_redirects = False - else: - follow_redirects = True - follow_all_redirects = False - - # Create a Http object and set some default options. - h = httplib2.Http(disable_ssl_certificate_validation=True, timeout=socket_timeout) - h.follow_all_redirects = follow_all_redirects - h.follow_redirects = follow_redirects - h.forward_authorization_headers = True - - # If they have a username or password verify they have both, then add them to the request - if user is not None and password is None: - module.fail_json(msg="Both a username and password need to be set.") - if password is not None and user is None: - module.fail_json(msg="Both a username and password need to be set.") - if user is not None and password is not None: - h.add_credentials(user, password) - - # is dest is set and is a directory, let's check if we get redirected and - # set the filename from that url - redirected = False - resp_redir = {} - r = {} - if dest is not None: - dest = os.path.expanduser(dest) - if os.path.isdir(dest): - # first check if we are redirected to a file download - h.follow_redirects=False - # Try the request - try: - resp_redir, content_redir = h.request(url, method=method, body=body, headers=headers) - # if we are redirected, update the url with the location header, - # and update dest with the new url filename - except: - pass - if 'status' in resp_redir and resp_redir['status'] in ["301", "302", "303", "307"]: - url = resp_redir['location'] - redirected = True - dest = os.path.join(dest, url_filename(url)) - # if destination file already exist, only download if file newer - if os.path.exists(dest): - t = datetime.datetime.utcfromtimestamp(os.path.getmtime(dest)) - tstamp = t.strftime('%a, %d %b %Y %H:%M:%S +0000') - headers['If-Modified-Since'] = tstamp - - # do safe redirects now, including 307 - h.follow_redirects=follow_redirects - - # Make the request, or try to :) - try: - resp, content = h.request(url, method=method, body=body, headers=headers) - r['redirected'] = redirected - r.update(resp_redir) - r.update(resp) - try: - return r, unicode(content.decode('unicode_escape')), dest - except: - return r, content, dest - except httplib2.RedirectMissingLocation: - module.fail_json(msg="A 3xx redirect response code was provided but no Location: header was provided to point to the new location.") - except httplib2.RedirectLimit: - module.fail_json(msg="The maximum number of redirections was reached without coming to a final URI.") - except httplib2.ServerNotFoundError: - module.fail_json(msg="Unable to resolve the host name given.") - except httplib2.RelativeURIError: - module.fail_json(msg="A relative, as opposed to an absolute URI, was passed in.") - except httplib2.FailedToDecompressContent: - module.fail_json(msg="The headers claimed that the content of the response was compressed but the decompression algorithm applied to the content failed.") - except httplib2.UnimplementedDigestAuthOptionError: - module.fail_json(msg="The server requested a type of Digest authentication that we are unfamiliar with.") - except httplib2.UnimplementedHmacDigestAuthOptionError: - module.fail_json(msg="The server requested a type of HMACDigest authentication that we are unfamiliar with.") - except httplib2.UnimplementedHmacDigestAuthOptionError: - module.fail_json(msg="The server requested a type of HMACDigest authentication that we are unfamiliar with.") - except socket.error, e: - module.fail_json(msg="Socket error: %s to %s" % (e, url)) - -def main(): - - module = AnsibleModule( - argument_spec = dict( - url = dict(required=True), - dest = dict(required=False, default=None), - user = dict(required=False, default=None), - password = dict(required=False, default=None), - body = dict(required=False, default=None), - method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH']), - return_content = dict(required=False, default='no', type='bool'), - force_basic_auth = dict(required=False, default='no', type='bool'), - follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']), - creates = dict(required=False, default=None), - removes = dict(required=False, default=None), - status_code = dict(required=False, default=[200], type='list'), - timeout = dict(required=False, default=30, type='int'), - ), - check_invalid_arguments=False, - add_file_common_args=True - ) - - if not HAS_HTTPLIB2: - module.fail_json(msg="httplib2 is not installed") - if not HAS_URLPARSE: - module.fail_json(msg="urlparse is not installed") - - url = module.params['url'] - user = module.params['user'] - password = module.params['password'] - body = module.params['body'] - method = module.params['method'] - dest = module.params['dest'] - return_content = module.params['return_content'] - force_basic_auth = module.params['force_basic_auth'] - redirects = module.params['follow_redirects'] - creates = module.params['creates'] - removes = module.params['removes'] - status_code = [int(x) for x in list(module.params['status_code'])] - socket_timeout = module.params['timeout'] - - # Grab all the http headers. Need this hack since passing multi-values is currently a bit ugly. (e.g. headers='{"Content-Type":"application/json"}') - dict_headers = {} - for key, value in module.params.iteritems(): - if key.startswith("HEADER_"): - skey = key.replace("HEADER_", "") - dict_headers[skey] = value - - - if creates is not None: - # do not run the command if the line contains creates=filename - # and the filename already exists. This allows idempotence - # of uri executions. - creates = os.path.expanduser(creates) - if os.path.exists(creates): - module.exit_json(stdout="skipped, since %s exists" % creates, skipped=True, changed=False, stderr=False, rc=0) - - if removes is not None: - # do not run the command if the line contains removes=filename - # and the filename do not exists. This allows idempotence - # of uri executions. - v = os.path.expanduser(removes) - if not os.path.exists(removes): - module.exit_json(stdout="skipped, since %s does not exist" % removes, skipped=True, changed=False, stderr=False, rc=0) - - - # httplib2 only sends authentication after the server asks for it with a 401. - # Some 'basic auth' servies fail to send a 401 and require the authentication - # up front. This creates the Basic authentication header and sends it immediately. - if force_basic_auth: - dict_headers["Authorization"] = "Basic {0}".format(base64.b64encode("{0}:{1}".format(user, password))) - - - # Make the request - resp, content, dest = uri(module, url, dest, user, password, body, method, dict_headers, redirects, socket_timeout) - resp['status'] = int(resp['status']) - - # Write the file out if requested - if dest is not None: - if resp['status'] == 304: - changed = False - else: - write_file(module, url, dest, content) - # allow file attribute changes - changed = True - module.params['path'] = dest - file_args = module.load_file_common_arguments(module.params) - file_args['path'] = dest - changed = module.set_fs_attributes_if_different(file_args, changed) - resp['path'] = dest - else: - changed = False - - # Transmogrify the headers, replacing '-' with '_', since variables dont work with dashes. - uresp = {} - for key, value in resp.iteritems(): - ukey = key.replace("-", "_") - uresp[ukey] = value - - if 'content_type' in uresp: - if uresp['content_type'].startswith('application/json'): - try: - js = json.loads(content) - uresp['json'] = js - except: - pass - if resp['status'] not in status_code: - module.fail_json(msg="Status code was not " + str(status_code), content=content, **uresp) - elif return_content: - module.exit_json(changed=changed, content=content, **uresp) - else: - module.exit_json(changed=changed, **uresp) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/campfire b/library/notification/campfire deleted file mode 100644 index 31e69fc5459..00000000000 --- a/library/notification/campfire +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -DOCUMENTATION = ''' ---- -module: campfire -version_added: "1.2" -short_description: Send a message to Campfire -description: - - Send a message to Campfire. - - Messages with newlines will result in a "Paste" message being sent. -version_added: "1.2" -options: - subscription: - description: - - The subscription name to use. - required: true - token: - description: - - API token. - required: true - room: - description: - - Room number to which the message should be sent. - required: true - msg: - description: - - The message body. - required: true - notify: - description: - - Send a notification sound before the message. - required: false - choices: ["56k", "bell", "bezos", "bueller", "clowntown", - "cottoneyejoe", "crickets", "dadgummit", "dangerzone", - "danielsan", "deeper", "drama", "greatjob", "greyjoy", - "guarantee", "heygirl", "horn", "horror", - "inconceivable", "live", "loggins", "makeitso", "noooo", - "nyan", "ohmy", "ohyeah", "pushit", "rimshot", - "rollout", "rumble", "sax", "secret", "sexyback", - "story", "tada", "tmyk", "trololo", "trombone", "unix", - "vuvuzela", "what", "whoomp", "yeah", "yodel"] - -# informational: requirements for nodes -requirements: [ urllib2, cgi ] -author: Adam Garside -''' - -EXAMPLES = ''' -- campfire: subscription=foo token=12345 room=123 msg="Task completed." - -- campfire: subscription=foo token=12345 room=123 notify=loggins - msg="Task completed ... with feeling." -''' - - -def main(): - - try: - import urllib2 - except ImportError: - module.fail_json(msg="urllib2 is required") - - try: - import cgi - except ImportError: - module.fail_json(msg="cgi is required") - - module = AnsibleModule( - argument_spec=dict( - subscription=dict(required=True), - token=dict(required=True), - room=dict(required=True), - msg=dict(required=True), - notify=dict(required=False, - choices=["56k", "bell", "bezos", "bueller", - "clowntown", "cottoneyejoe", - "crickets", "dadgummit", "dangerzone", - "danielsan", "deeper", "drama", - "greatjob", "greyjoy", "guarantee", - "heygirl", "horn", "horror", - "inconceivable", "live", "loggins", - "makeitso", "noooo", "nyan", "ohmy", - "ohyeah", "pushit", "rimshot", - "rollout", "rumble", "sax", "secret", - "sexyback", "story", "tada", "tmyk", - "trololo", "trombone", "unix", - "vuvuzela", "what", "whoomp", "yeah", - "yodel"]), - ), - supports_check_mode=False - ) - - subscription = module.params["subscription"] - token = module.params["token"] - room = module.params["room"] - msg = module.params["msg"] - notify = module.params["notify"] - - URI = "https://%s.campfirenow.com" % subscription - NSTR = "SoundMessage%s" - MSTR = "%s" - AGENT = "Ansible/1.2" - - try: - - # Setup basic auth using token as the username - pm = urllib2.HTTPPasswordMgrWithDefaultRealm() - pm.add_password(None, URI, token, 'X') - - # Setup Handler and define the opener for the request - handler = urllib2.HTTPBasicAuthHandler(pm) - opener = urllib2.build_opener(handler) - - target_url = '%s/room/%s/speak.xml' % (URI, room) - - # Send some audible notification if requested - if notify: - req = urllib2.Request(target_url, NSTR % cgi.escape(notify)) - req.add_header('Content-Type', 'application/xml') - req.add_header('User-agent', AGENT) - response = opener.open(req) - - # Send the message - req = urllib2.Request(target_url, MSTR % cgi.escape(msg)) - req.add_header('Content-Type', 'application/xml') - req.add_header('User-agent', AGENT) - response = opener.open(req) - - except urllib2.HTTPError, e: - if not (200 <= e.code < 300): - module.fail_json(msg="unable to send msg: '%s', campfire api" - " returned error code: '%s'" % - (msg, e.code)) - - except Exception, e: - module.fail_json(msg="unable to send msg: %s" % msg) - - module.exit_json(changed=True, room=room, msg=msg, notify=notify) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/flowdock b/library/notification/flowdock deleted file mode 100644 index 009487fb438..00000000000 --- a/library/notification/flowdock +++ /dev/null @@ -1,192 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Matt Coddington -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: flowdock -version_added: "1.2" -author: Matt Coddington -short_description: Send a message to a flowdock -description: - - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat) -options: - token: - description: - - API token. - required: true - type: - description: - - Whether to post to 'inbox' or 'chat' - required: true - choices: [ "inbox", "chat" ] - msg: - description: - - Content of the message - required: true - tags: - description: - - tags of the message, separated by commas - required: false - external_user_name: - description: - - (chat only - required) Name of the "user" sending the message - required: false - from_address: - description: - - (inbox only - required) Email address of the message sender - required: false - source: - description: - - (inbox only - required) Human readable identifier of the application that uses the Flowdock API - required: false - subject: - description: - - (inbox only - required) Subject line of the message - required: false - from_name: - description: - - (inbox only) Name of the message sender - required: false - reply_to: - description: - - (inbox only) Email address for replies - required: false - project: - description: - - (inbox only) Human readable identifier for more detailed message categorization - required: false - link: - description: - - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox. - required: false - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 - -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] -''' - -EXAMPLES = ''' -- flowdock: type=inbox - token=AAAAAA - from_address=user@example.com - source='my cool app' - msg='test from ansible' - subject='test subject' - -- flowdock: type=chat - token=AAAAAA - external_user_name=testuser - msg='test from ansible' - tags=tag1,tag2,tag3 -''' - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True), - msg=dict(required=True), - type=dict(required=True, choices=["inbox","chat"]), - external_user_name=dict(required=False), - from_address=dict(required=False), - source=dict(required=False), - subject=dict(required=False), - from_name=dict(required=False), - reply_to=dict(required=False), - project=dict(required=False), - tags=dict(required=False), - link=dict(required=False), - validate_certs = dict(default='yes', type='bool'), - ), - supports_check_mode=True - ) - - type = module.params["type"] - token = module.params["token"] - if type == 'inbox': - url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token) - else: - url = "https://api.flowdock.com/v1/messages/chat/%s" % (token) - - params = {} - - # required params - params['content'] = module.params["msg"] - - # required params for the 'chat' type - if module.params['external_user_name']: - if type == 'inbox': - module.fail_json(msg="external_user_name is not valid for the 'inbox' type") - else: - params['external_user_name'] = module.params["external_user_name"] - elif type == 'chat': - module.fail_json(msg="%s is required for the 'inbox' type" % item) - - # required params for the 'inbox' type - for item in [ 'from_address', 'source', 'subject' ]: - if module.params[item]: - if type == 'chat': - module.fail_json(msg="%s is not valid for the 'chat' type" % item) - else: - params[item] = module.params[item] - elif type == 'inbox': - module.fail_json(msg="%s is required for the 'inbox' type" % item) - - # optional params - if module.params["tags"]: - params['tags'] = module.params["tags"] - - # optional params for the 'inbox' type - for item in [ 'from_name', 'reply_to', 'project', 'link' ]: - if module.params[item]: - if type == 'chat': - module.fail_json(msg="%s is not valid for the 'chat' type" % item) - else: - params[item] = module.params[item] - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=False) - - # Send the data to Flowdock - data = urllib.urlencode(params) - response, info = fetch_url(module, url, data=data) - if info['status'] != 200: - module.fail_json(msg="unable to send msg: %s" % info['msg']) - - module.exit_json(changed=True, msg=module.params["msg"]) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() - diff --git a/library/notification/grove b/library/notification/grove deleted file mode 100644 index e6bf241bdaa..00000000000 --- a/library/notification/grove +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -DOCUMENTATION = ''' ---- -module: grove -version_added: 1.4 -short_description: Sends a notification to a grove.io channel -description: - - The M(grove) module sends a message for a service to a Grove.io - channel. -options: - channel_token: - description: - - Token of the channel to post to. - required: true - service: - description: - - Name of the service (displayed as the "user" in the message) - required: false - default: ansible - message: - description: - - Message content - required: true - url: - description: - - Service URL for the web client - required: false - icon_url: - description: - - Icon for the service - required: false - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 -author: Jonas Pfenniger -''' - -EXAMPLES = ''' -- grove: > - channel_token=6Ph62VBBJOccmtTPZbubiPzdrhipZXtg - service=my-app - message=deployed {{ target }} -''' - -BASE_URL = 'https://grove.io/api/notice/%s/' - -# ============================================================== -# do_notify_grove - -def do_notify_grove(module, channel_token, service, message, url=None, icon_url=None): - my_url = BASE_URL % (channel_token,) - - my_data = dict(service=service, message=message) - if url is not None: - my_data['url'] = url - if icon_url is not None: - my_data['icon_url'] = icon_url - - data = urllib.urlencode(my_data) - response, info = fetch_url(module, my_url, data=data) - if info['status'] != 200: - module.fail_json(msg="failed to send notification: %s" % info['msg']) - -# ============================================================== -# main - -def main(): - module = AnsibleModule( - argument_spec = dict( - channel_token = dict(type='str', required=True), - message = dict(type='str', required=True), - service = dict(type='str', default='ansible'), - url = dict(type='str', default=None), - icon_url = dict(type='str', default=None), - validate_certs = dict(default='yes', type='bool'), - ) - ) - - channel_token = module.params['channel_token'] - service = module.params['service'] - message = module.params['message'] - url = module.params['url'] - icon_url = module.params['icon_url'] - - do_notify_grove(module, channel_token, service, message, url, icon_url) - - # Mission complete - module.exit_json(msg="OK") - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/hipchat b/library/notification/hipchat deleted file mode 100644 index 4ff95b32bf6..00000000000 --- a/library/notification/hipchat +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -DOCUMENTATION = ''' ---- -module: hipchat -version_added: "1.2" -short_description: Send a message to hipchat -description: - - Send a message to hipchat -options: - token: - description: - - API token. - required: true - room: - description: - - ID or name of the room. - required: true - from: - description: - - Name the message will appear be sent from. max 15 characters. - Over 15, will be shorten. - required: false - default: Ansible - msg: - description: - - The message body. - required: true - default: null - color: - description: - - Background color for the message. Default is yellow. - required: false - default: yellow - choices: [ "yellow", "red", "green", "purple", "gray", "random" ] - msg_format: - description: - - message format. html or text. Default is text. - required: false - default: text - choices: [ "text", "html" ] - notify: - description: - - notify or not (change the tab color, play a sound, etc) - required: false - default: 'yes' - choices: [ "yes", "no" ] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 - api: - description: - - API url if using a self-hosted hipchat server - required: false - default: 'https://api.hipchat.com/v1/rooms/message' - version_added: 1.6.0 - - -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] -author: WAKAYAMA Shirou -''' - -EXAMPLES = ''' -- hipchat: token=AAAAAA room=notify msg="Ansible task finished" -''' - -# =========================================== -# HipChat module specific support methods. -# - -MSG_URI = "https://api.hipchat.com/v1/rooms/message" - -def send_msg(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=MSG_URI): - '''sending message to hipchat''' - - params = {} - params['room_id'] = room - params['from'] = msg_from[:15] # max length is 15 - params['message'] = msg - params['message_format'] = msg_format - params['color'] = color - params['api'] = api - - if notify: - params['notify'] = 1 - else: - params['notify'] = 0 - - url = api + "?auth_token=%s" % (token) - data = urllib.urlencode(params) - response, info = fetch_url(module, url, data=data) - if info['status'] == 200: - return response.read() - else: - module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True), - room=dict(required=True), - msg=dict(required=True), - msg_from=dict(default="Ansible", aliases=['from']), - color=dict(default="yellow", choices=["yellow", "red", "green", - "purple", "gray", "random"]), - msg_format=dict(default="text", choices=["text", "html"]), - notify=dict(default=True, type='bool'), - validate_certs = dict(default='yes', type='bool'), - api = dict(default=MSG_URI), - ), - supports_check_mode=True - ) - - token = module.params["token"] - room = module.params["room"] - msg = module.params["msg"] - msg_from = module.params["msg_from"] - color = module.params["color"] - msg_format = module.params["msg_format"] - notify = module.params["notify"] - api = module.params["api"] - - try: - send_msg(module, token, room, msg_from, msg, msg_format, color, notify, api) - except Exception, e: - module.fail_json(msg="unable to sent msg: %s" % e) - - changed = True - module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/notification/irc b/library/notification/irc deleted file mode 100644 index a90834f820d..00000000000 --- a/library/notification/irc +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Jan-Piet Mens -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: irc -version_added: "1.2" -short_description: Send a message to an IRC channel -description: - - Send a message to an IRC channel. This is a very simplistic implementation. -options: - server: - description: - - IRC server name/address - required: false - default: localhost - port: - description: - - IRC server port number - required: false - default: 6667 - nick: - description: - - Nickname. May be shortened, depending on server's NICKLEN setting. - required: false - default: ansible - msg: - description: - - The message body. - required: true - default: null - color: - description: - - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none"). - required: false - default: "none" - choices: [ "none", "yellow", "red", "green", "blue", "black" ] - channel: - description: - - Channel name - required: true - key: - description: - - Channel key - required: false - version_added: 1.7 - passwd: - description: - - Server password - required: false - timeout: - description: - - Timeout to use while waiting for successful registration and join - messages, this is to prevent an endless loop - default: 30 - version_added: 1.5 - use_ssl: - description: - - Designates whether TLS/SSL should be used when connecting to the IRC server - default: False - version_added: 1.8 - -# informational: requirements for nodes -requirements: [ socket ] -author: Jan-Piet Mens, Matt Martz -''' - -EXAMPLES = ''' -- irc: server=irc.example.net channel="#t1" msg="Hello world" - -- local_action: irc port=6669 - channel="#t1" - msg="All finished at {{ ansible_date_time.iso8601 }}" - color=red - nick=ansibleIRC -''' - -# =========================================== -# IRC module support methods. -# - -import re -import socket -import ssl - -from time import sleep - - -def send_msg(channel, msg, server='localhost', port='6667', key=None, - nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False): - '''send message to IRC''' - - colornumbers = { - 'black': "01", - 'red': "04", - 'green': "09", - 'yellow': "08", - 'blue': "12", - } - - try: - colornumber = colornumbers[color] - colortext = "\x03" + colornumber - except: - colortext = "" - - message = colortext + msg - - irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - if use_ssl: - irc = ssl.wrap_socket(irc) - irc.connect((server, int(port))) - if passwd: - irc.send('PASS %s\r\n' % passwd) - irc.send('NICK %s\r\n' % nick) - irc.send('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick)) - motd = '' - start = time.time() - while 1: - motd += irc.recv(1024) - # The server might send back a shorter nick than we specified (due to NICKLEN), - # so grab that and use it from now on (assuming we find the 00[1-4] response). - match = re.search('^:\S+ 00[1-4] (?P\S+) :', motd, flags=re.M) - if match: - nick = match.group('nick') - break - elif time.time() - start > timeout: - raise Exception('Timeout waiting for IRC server welcome response') - sleep(0.5) - - if key: - irc.send('JOIN %s %s\r\n' % (channel, key)) - else: - irc.send('JOIN %s\r\n' % channel) - - join = '' - start = time.time() - while 1: - join += irc.recv(1024) - if re.search('^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M): - break - elif time.time() - start > timeout: - raise Exception('Timeout waiting for IRC JOIN response') - sleep(0.5) - - irc.send('PRIVMSG %s :%s\r\n' % (channel, message)) - sleep(1) - irc.send('PART %s\r\n' % channel) - irc.send('QUIT\r\n') - sleep(1) - irc.close() - -# =========================================== -# Main -# - - -def main(): - module = AnsibleModule( - argument_spec=dict( - server=dict(default='localhost'), - port=dict(default=6667), - nick=dict(default='ansible'), - msg=dict(required=True), - color=dict(default="none", choices=["yellow", "red", "green", - "blue", "black", "none"]), - channel=dict(required=True), - key=dict(), - passwd=dict(), - timeout=dict(type='int', default=30), - use_ssl=dict(type='bool', default=False) - ), - supports_check_mode=True - ) - - server = module.params["server"] - port = module.params["port"] - nick = module.params["nick"] - msg = module.params["msg"] - color = module.params["color"] - channel = module.params["channel"] - key = module.params["key"] - passwd = module.params["passwd"] - timeout = module.params["timeout"] - use_ssl = module.params["use_ssl"] - - try: - send_msg(channel, msg, server, port, key, nick, color, passwd, timeout, use_ssl) - except Exception, e: - module.fail_json(msg="unable to send to IRC: %s" % e) - - module.exit_json(changed=False, channel=channel, nick=nick, - msg=msg) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/jabber b/library/notification/jabber deleted file mode 100644 index 8a7eed37b33..00000000000 --- a/library/notification/jabber +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -DOCUMENTATION = ''' ---- -version_added: "1.2" -module: jabber -short_description: Send a message to jabber user or chat room -description: - - Send a message to jabber -options: - user: - description: - User as which to connect - required: true - password: - description: - password for user to connect - required: true - to: - description: - user ID or name of the room, when using room use a slash to indicate your nick. - required: true - msg: - description: - - The message body. - required: true - default: null - host: - description: - host to connect, overrides user info - required: false - port: - description: - port to connect to, overrides default - required: false - default: 5222 - encoding: - description: - message encoding - required: false - -# informational: requirements for nodes -requirements: [ xmpp ] -author: Brian Coca -''' - -EXAMPLES = ''' -# send a message to a user -- jabber: user=mybot@example.net - password=secret - to=friend@example.net - msg="Ansible task finished" - -# send a message to a room -- jabber: user=mybot@example.net - password=secret - to=mychaps@conference.example.net/ansiblebot - msg="Ansible task finished" - -# send a message, specifying the host and port -- jabber user=mybot@example.net - host=talk.example.net - port=5223 - password=secret - to=mychaps@example.net - msg="Ansible task finished" -''' - -import os -import re -import time - -HAS_XMPP = True -try: - import xmpp -except ImportError: - HAS_XMPP = False - -def main(): - - module = AnsibleModule( - argument_spec=dict( - user=dict(required=True), - password=dict(required=True), - to=dict(required=True), - msg=dict(required=True), - host=dict(required=False), - port=dict(required=False,default=5222), - encoding=dict(required=False), - ), - supports_check_mode=True - ) - - if not HAS_XMPP: - module.fail_json(msg="xmpp is not installed") - - jid = xmpp.JID(module.params['user']) - user = jid.getNode() - server = jid.getDomain() - port = module.params['port'] - password = module.params['password'] - try: - to, nick = module.params['to'].split('/', 1) - except ValueError: - to, nick = module.params['to'], None - - if module.params['host']: - host = module.params['host'] - else: - host = server - if module.params['encoding']: - xmpp.simplexml.ENCODING = params['encoding'] - - msg = xmpp.protocol.Message(body=module.params['msg']) - - try: - conn=xmpp.Client(server) - if not conn.connect(server=(host,port)): - module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server)) - if not conn.auth(user,password,'Ansible'): - module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user,server)) - # some old servers require this, also the sleep following send - conn.sendInitPresence(requestRoster=0) - - if nick: # sending to room instead of user, need to join - msg.setType('groupchat') - msg.setTag('x', namespace='http://jabber.org/protocol/muc#user') - conn.send(xmpp.Presence(to=module.params['to'])) - time.sleep(1) - else: - msg.setType('chat') - - msg.setTo(to) - if not module.check_mode: - conn.send(msg) - time.sleep(1) - conn.disconnect() - except Exception, e: - module.fail_json(msg="unable to send msg: %s" % e) - - module.exit_json(changed=False, to=to, user=user, msg=msg.getBody()) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/mail b/library/notification/mail deleted file mode 100644 index 34cd3a09bf3..00000000000 --- a/library/notification/mail +++ /dev/null @@ -1,252 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2012 Dag Wieers -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ ---- -author: Dag Wieers -module: mail -short_description: Send an email -description: - - This module is useful for sending emails from playbooks. - - One may wonder why automate sending emails? In complex environments - there are from time to time processes that cannot be automated, either - because you lack the authority to make it so, or because not everyone - agrees to a common approach. - - If you cannot automate a specific step, but the step is non-blocking, - sending out an email to the responsible party to make him perform his - part of the bargain is an elegant way to put the responsibility in - someone else's lap. - - Of course sending out a mail can be equally useful as a way to notify - one or more people in a team that a specific action has been - (successfully) taken. -version_added: "0.8" -options: - from: - description: - - The email-address the mail is sent from. May contain address and phrase. - default: root - required: false - to: - description: - - The email-address(es) the mail is being sent to. This is - a comma-separated list, which may contain address and phrase portions. - default: root - required: false - cc: - description: - - The email-address(es) the mail is being copied to. This is - a comma-separated list, which may contain address and phrase portions. - required: false - bcc: - description: - - The email-address(es) the mail is being 'blind' copied to. This is - a comma-separated list, which may contain address and phrase portions. - required: false - subject: - description: - - The subject of the email being sent. - aliases: [ msg ] - required: true - body: - description: - - The body of the email being sent. - default: $subject - required: false - host: - description: - - The mail server - default: 'localhost' - required: false - port: - description: - - The mail server port - default: '25' - required: false - version_added: "1.0" - attach: - description: - - A space-separated list of pathnames of files to attach to the message. - Attached files will have their content-type set to C(application/octet-stream). - default: null - required: false - version_added: "1.0" - headers: - description: - - A vertical-bar-separated list of headers which should be added to the message. - Each individual header is specified as C(header=value) (see example below). - default: null - required: false - version_added: "1.0" - charset: - description: - - The character set of email being sent - default: 'us-ascii' - required: false -""" - -EXAMPLES = ''' -# Example playbook sending mail to root -- local_action: mail msg='System {{ ansible_hostname }} has been successfully provisioned.' - -# Send e-mail to a bunch of users, attaching files -- local_action: mail - host='127.0.0.1' - port=2025 - subject="Ansible-report" - body="Hello, this is an e-mail. I hope you like it ;-)" - from="jane@example.net (Jane Jolie)" - to="John Doe , Suzie Something " - cc="Charlie Root " - attach="/etc/group /tmp/pavatar2.png" - headers=Reply-To=john@example.com|X-Special="Something or other" - charset=utf8 -''' - -import os -import sys -import smtplib - -try: - from email import encoders - import email.utils - from email.utils import parseaddr, formataddr - from email.mime.base import MIMEBase - from mail.mime.multipart import MIMEMultipart - from email.mime.text import MIMEText -except ImportError: - from email import Encoders as encoders - import email.Utils - from email.Utils import parseaddr, formataddr - from email.MIMEBase import MIMEBase - from email.MIMEMultipart import MIMEMultipart - from email.MIMEText import MIMEText - -def main(): - - module = AnsibleModule( - argument_spec = dict( - host = dict(default='localhost'), - port = dict(default='25'), - sender = dict(default='root', aliases=['from']), - to = dict(default='root', aliases=['recipients']), - cc = dict(default=None), - bcc = dict(default=None), - subject = dict(required=True, aliases=['msg']), - body = dict(default=None), - attach = dict(default=None), - headers = dict(default=None), - charset = dict(default='us-ascii') - ) - ) - - host = module.params.get('host') - port = module.params.get('port') - sender = module.params.get('sender') - recipients = module.params.get('to') - copies = module.params.get('cc') - blindcopies = module.params.get('bcc') - subject = module.params.get('subject') - body = module.params.get('body') - attach_files = module.params.get('attach') - headers = module.params.get('headers') - charset = module.params.get('charset') - - sender_phrase, sender_addr = parseaddr(sender) - - if not body: - body = subject - - try: - smtp = smtplib.SMTP(host, port=int(port)) - except Exception, e: - module.fail_json(rc=1, msg='Failed to send mail to server %s on port %s: %s' % (host, port, e)) - - - msg = MIMEMultipart() - msg['Subject'] = subject - msg['From'] = formataddr((sender_phrase, sender_addr)) - msg.preamble = "Multipart message" - - if headers is not None: - for hdr in [x.strip() for x in headers.split('|')]: - try: - h_key, h_val = hdr.split('=') - msg.add_header(h_key, h_val) - except: - pass - - if 'X-Mailer' not in msg: - msg.add_header('X-Mailer', "Ansible") - - to_list = [] - cc_list = [] - addr_list = [] - - if recipients is not None: - for addr in [x.strip() for x in recipients.split(',')]: - to_list.append( formataddr( parseaddr(addr)) ) - addr_list.append( parseaddr(addr)[1] ) # address only, w/o phrase - if copies is not None: - for addr in [x.strip() for x in copies.split(',')]: - cc_list.append( formataddr( parseaddr(addr)) ) - addr_list.append( parseaddr(addr)[1] ) # address only, w/o phrase - if blindcopies is not None: - for addr in [x.strip() for x in blindcopies.split(',')]: - addr_list.append( parseaddr(addr)[1] ) - - if len(to_list) > 0: - msg['To'] = ", ".join(to_list) - if len(cc_list) > 0: - msg['Cc'] = ", ".join(cc_list) - - part = MIMEText(body + "\n\n", _charset=charset) - msg.attach(part) - - if attach_files is not None: - for file in attach_files.split(): - try: - fp = open(file, 'rb') - - part = MIMEBase('application', 'octet-stream') - part.set_payload(fp.read()) - fp.close() - - encoders.encode_base64(part) - - part.add_header('Content-disposition', 'attachment', filename=os.path.basename(file)) - msg.attach(part) - except Exception, e: - module.fail_json(rc=1, msg="Failed to send mail: can't attach file %s: %s" % (file, e)) - sys.exit() - - composed = msg.as_string() - - try: - smtp.sendmail(sender_addr, set(addr_list), composed) - except Exception, e: - module.fail_json(rc=1, msg='Failed to send mail to %s: %s' % (", ".join(addr_list), e)) - - smtp.quit() - - module.exit_json(changed=False) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/mqtt b/library/notification/mqtt deleted file mode 100644 index d701bd9348a..00000000000 --- a/library/notification/mqtt +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, 2014, Jan-Piet Mens -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: mqtt -short_description: Publish a message on an MQTT topic for the IoT -version_added: "1.2" -description: - - Publish a message on an MQTT topic. -options: - server: - description: - - MQTT broker address/name - required: false - default: localhost - port: - description: - - MQTT broker port number - required: false - default: 1883 - username: - description: - - Username to authenticate against the broker. - required: false - password: - description: - - Password for C(username) to authenticate against the broker. - required: false - client_id: - description: - - MQTT client identifier - required: false - default: hostname + pid - topic: - description: - - MQTT topic name - required: true - default: null - payload: - description: - - Payload. The special string C("None") may be used to send a NULL - (i.e. empty) payload which is useful to simply notify with the I(topic) - or to clear previously retained messages. - required: true - default: null - qos: - description: - - QoS (Quality of Service) - required: false - default: 0 - choices: [ "0", "1", "2" ] - retain: - description: - - Setting this flag causes the broker to retain (i.e. keep) the message so that - applications that subsequently subscribe to the topic can received the last - retained message immediately. - required: false - default: False - -# informational: requirements for nodes -requirements: [ mosquitto ] -notes: - - This module requires a connection to an MQTT broker such as Mosquitto - U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.python.org/pypi/paho-mqtt)). -author: Jan-Piet Mens -''' - -EXAMPLES = ''' -- local_action: mqtt - topic=service/ansible/{{ ansible_hostname }} - payload="Hello at {{ ansible_date_time.iso8601 }}" - qos=0 - retain=false - client_id=ans001 -''' - -# =========================================== -# MQTT module support methods. -# - -HAS_PAHOMQTT = True -try: - import socket - import paho.mqtt.publish as mqtt -except ImportError: - HAS_PAHOMQTT = False - -# =========================================== -# Main -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - server = dict(default = 'localhost'), - port = dict(default = 1883), - topic = dict(required = True), - payload = dict(required = True), - client_id = dict(default = None), - qos = dict(default="0", choices=["0", "1", "2"]), - retain = dict(default=False, type='bool'), - username = dict(default = None), - password = dict(default = None), - ), - supports_check_mode=True - ) - - if not HAS_PAHOMQTT: - module.fail_json(msg="Paho MQTT is not installed") - - server = module.params.get("server", 'localhost') - port = module.params.get("port", 1883) - topic = module.params.get("topic") - payload = module.params.get("payload") - client_id = module.params.get("client_id", '') - qos = int(module.params.get("qos", 0)) - retain = module.params.get("retain") - username = module.params.get("username", None) - password = module.params.get("password", None) - - if client_id is None: - client_id = "%s_%s" % (socket.getfqdn(), os.getpid()) - - if payload and payload == 'None': - payload = None - - auth=None - if username is not None: - auth = { 'username' : username, 'password' : password } - - try: - rc = mqtt.single(topic, payload, - qos=qos, - retain=retain, - client_id=client_id, - hostname=server, - port=port, - auth=auth) - except Exception, e: - module.fail_json(msg="unable to publish to MQTT broker %s" % (e)) - - module.exit_json(changed=False, topic=topic) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/nexmo b/library/notification/nexmo deleted file mode 100644 index d4898c40cdb..00000000000 --- a/library/notification/nexmo +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Matt Martz -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ -module: nexmo -short_description: Send a SMS via nexmo -description: - - Send a SMS message via nexmo -version_added: 1.6 -author: Matt Martz -options: - api_key: - description: - - Nexmo API Key - required: true - api_secret: - description: - - Nexmo API Secret - required: true - src: - description: - - Nexmo Number to send from - required: true - dest: - description: - - Phone number(s) to send SMS message to - required: true - msg: - description: - - Message to text to send. Messages longer than 160 characters will be - split into multiple messages - required: true - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: - - 'yes' - - 'no' -""" - -EXAMPLES = """ -- name: Send notification message via Nexmo - local_action: - module: nexmo - api_key: 640c8a53 - api_secret: 0ce239a6 - src: 12345678901 - dest: - - 10987654321 - - 16789012345 - msg: "{{ inventory_hostname }} completed" -""" - - -NEXMO_API = 'https://rest.nexmo.com/sms/json' - - -def send_msg(module): - failed = list() - responses = dict() - msg = { - 'api_key': module.params.get('api_key'), - 'api_secret': module.params.get('api_secret'), - 'from': module.params.get('src'), - 'text': module.params.get('msg') - } - for number in module.params.get('dest'): - msg['to'] = number - url = "%s?%s" % (NEXMO_API, urllib.urlencode(msg)) - - headers = dict(Accept='application/json') - response, info = fetch_url(module, url, headers=headers) - if info['status'] != 200: - failed.append(number) - responses[number] = dict(failed=True) - - try: - responses[number] = json.load(response) - except: - failed.append(number) - responses[number] = dict(failed=True) - else: - for message in responses[number]['messages']: - if int(message['status']) != 0: - failed.append(number) - responses[number] = dict(failed=True, **responses[number]) - - if failed: - msg = 'One or messages failed to send' - else: - msg = '' - - module.exit_json(failed=bool(failed), msg=msg, changed=False, - responses=responses) - - -def main(): - argument_spec = url_argument_spec() - argument_spec.update( - dict( - api_key=dict(required=True, no_log=True), - api_secret=dict(required=True, no_log=True), - src=dict(required=True, type='int'), - dest=dict(required=True, type='list'), - msg=dict(required=True), - ), - ) - - module = AnsibleModule( - argument_spec=argument_spec - ) - - send_msg(module) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/notification/osx_say b/library/notification/osx_say deleted file mode 100644 index 39e3da88c19..00000000000 --- a/library/notification/osx_say +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: osx_say -version_added: "1.2" -short_description: Makes an OSX computer to speak. -description: - - makes an OS computer speak! Amuse your friends, annoy your coworkers! -notes: - - If you like this module, you may also be interested in the osx_say callback in the plugins/ directory of the source checkout. -options: - msg: - description: - What to say - required: true - voice: - description: - What voice to use - required: false -requirements: [ say ] -author: Michael DeHaan -''' - -EXAMPLES = ''' -- local_action: osx_say msg="{{inventory_hostname}} is all done" voice=Zarvox -''' - -DEFAULT_VOICE='Trinoids' - -def say(module, msg, voice): - module.run_command(["/usr/bin/say", msg, "--voice=%s" % (voice)], check_rc=True) - -def main(): - - module = AnsibleModule( - argument_spec=dict( - msg=dict(required=True), - voice=dict(required=False, default=DEFAULT_VOICE), - ), - supports_check_mode=False - ) - - if not os.path.exists("/usr/bin/say"): - module.fail_json(msg="/usr/bin/say is not installed") - - msg = module.params['msg'] - voice = module.params['voice'] - - say(module, msg, voice) - - module.exit_json(msg=msg, changed=False) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/slack b/library/notification/slack deleted file mode 100644 index 176d6b338fb..00000000000 --- a/library/notification/slack +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Ramon de la Fuente -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ -module: slack -short_description: Send Slack notifications -description: - - The M(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration -version_added: 1.6 -author: Ramon de la Fuente -options: - domain: - description: - - Slack (sub)domain for your environment without protocol. - (i.e. C(future500.slack.com)) - required: true - token: - description: - - Slack integration token - required: true - msg: - description: - - Message to send. - required: true - channel: - description: - - Channel to send the message to. If absent, the message goes to the channel selected for the I(token). - required: false - username: - description: - - This is the sender of the message. - required: false - default: ansible - icon_url: - description: - - Url for the message sender's icon (default C(http://www.ansible.com/favicon.ico)) - required: false - icon_emoji: - description: - - Emoji for the message sender. See Slack documentation for options. - (if I(icon_emoji) is set, I(icon_url) will not be used) - required: false - link_names: - description: - - Automatically create links for channels and usernames in I(msg). - required: false - default: 1 - choices: - - 1 - - 0 - parse: - description: - - Setting for the message parser at Slack - required: false - choices: - - 'full' - - 'none' - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: - - 'yes' - - 'no' -""" - -EXAMPLES = """ -- name: Send notification message via Slack - local_action: - module: slack - domain: future500.slack.com - token: thetokengeneratedbyslack - msg: "{{ inventory_hostname }} completed" - -- name: Send notification message via Slack all options - local_action: - module: slack - domain: future500.slack.com - token: thetokengeneratedbyslack - msg: "{{ inventory_hostname }} completed" - channel: "#ansible" - username: "Ansible on {{ inventory_hostname }}" - icon_url: "http://www.example.com/some-image-file.png" - link_names: 0 - parse: 'none' - -""" - - -SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' - -def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse): - payload = dict(text=text) - - if channel is not None: - payload['channel'] = channel if (channel[0] == '#') else '#'+channel - if username is not None: - payload['username'] = username - if icon_emoji is not None: - payload['icon_emoji'] = icon_emoji - else: - payload['icon_url'] = icon_url - if link_names is not None: - payload['link_names'] = link_names - if parse is not None: - payload['parse'] = parse - - payload="payload=" + module.jsonify(payload) - return payload - -def do_notify_slack(module, domain, token, payload): - slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, token) - - response, info = fetch_url(module, slack_incoming_webhook, data=payload) - if info['status'] != 200: - obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, '[obscured]') - module.fail_json(msg=" failed to send %s to %s: %s" % (payload, obscured_incoming_webhook, info['msg'])) - -def main(): - module = AnsibleModule( - argument_spec = dict( - domain = dict(type='str', required=True), - token = dict(type='str', required=True), - msg = dict(type='str', required=True), - channel = dict(type='str', default=None), - username = dict(type='str', default='Ansible'), - icon_url = dict(type='str', default='http://www.ansible.com/favicon.ico'), - icon_emoji = dict(type='str', default=None), - link_names = dict(type='int', default=1, choices=[0,1]), - parse = dict(type='str', default=None, choices=['none', 'full']), - - validate_certs = dict(default='yes', type='bool'), - ) - ) - - domain = module.params['domain'] - token = module.params['token'] - text = module.params['msg'] - channel = module.params['channel'] - username = module.params['username'] - icon_url = module.params['icon_url'] - icon_emoji = module.params['icon_emoji'] - link_names = module.params['link_names'] - parse = module.params['parse'] - - payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse) - do_notify_slack(module, domain, token, payload) - - module.exit_json(msg="OK") - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() \ No newline at end of file diff --git a/library/notification/sns b/library/notification/sns deleted file mode 100644 index f2ed178554e..00000000000 --- a/library/notification/sns +++ /dev/null @@ -1,190 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Michael J. Schultz -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ -module: sns -short_description: Send Amazon Simple Notification Service (SNS) messages -description: - - The M(sns) module sends notifications to a topic on your Amazon SNS account -version_added: 1.6 -author: Michael J. Schultz -options: - msg: - description: - - Default message to send. - required: true - aliases: [ "default" ] - subject: - description: - - Subject line for email delivery. - required: false - topic: - description: - - The topic you want to publish to. - required: true - email: - description: - - Message to send to email-only subscription - required: false - sqs: - description: - - Message to send to SQS-only subscription - required: false - sms: - description: - - Message to send to SMS-only subscription - required: false - http: - description: - - Message to send to HTTP-only subscription - required: false - https: - description: - - Message to send to HTTPS-only subscription - required: false - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_secret_key', 'secret_key'] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key'] - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - -requirements: [ "boto" ] -author: Michael J. Schultz -""" - -EXAMPLES = """ -- name: Send default notification message via SNS - local_action: - module: sns - msg: "{{ inventory_hostname }} has completed the play." - subject: "Deploy complete!" - topic: "deploy" - -- name: Send notification messages via SNS with short message for SMS - local_action: - module: sns - msg: "{{ inventory_hostname }} has completed the play." - sms: "deployed!" - subject: "Deploy complete!" - topic: "deploy" -""" - -import sys - -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -try: - import boto - import boto.sns -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -def arn_topic_lookup(connection, short_topic): - response = connection.get_all_topics() - result = response[u'ListTopicsResponse'][u'ListTopicsResult'] - # topic names cannot have colons, so this captures the full topic name - lookup_topic = ':{}'.format(short_topic) - for topic in result[u'Topics']: - if topic[u'TopicArn'].endswith(lookup_topic): - return topic[u'TopicArn'] - return None - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - msg=dict(type='str', required=True, aliases=['default']), - subject=dict(type='str', default=None), - topic=dict(type='str', required=True), - email=dict(type='str', default=None), - sqs=dict(type='str', default=None), - sms=dict(type='str', default=None), - http=dict(type='str', default=None), - https=dict(type='str', default=None), - ) - ) - - module = AnsibleModule(argument_spec=argument_spec) - - msg = module.params['msg'] - subject = module.params['subject'] - topic = module.params['topic'] - email = module.params['email'] - sqs = module.params['sqs'] - sms = module.params['sms'] - http = module.params['http'] - https = module.params['https'] - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if not region: - module.fail_json(msg="region must be specified") - try: - connection = connect_to_aws(boto.sns, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - - # .publish() takes full ARN topic id, but I'm lazy and type shortnames - # so do a lookup (topics cannot contain ':', so thats the decider) - if ':' in topic: - arn_topic = topic - else: - arn_topic = arn_topic_lookup(connection, topic) - - if not arn_topic: - module.fail_json(msg='Could not find topic: {}'.format(topic)) - - dict_msg = {'default': msg} - if email: - dict_msg.update(email=email) - if sqs: - dict_msg.update(sqs=sqs) - if sms: - dict_msg.update(sms=sms) - if http: - dict_msg.update(http=http) - if https: - dict_msg.update(https=https) - - json_msg = json.dumps(dict_msg) - try: - connection.publish(topic=arn_topic, subject=subject, - message_structure='json', message=json_msg) - except boto.exception.BotoServerError, e: - module.fail_json(msg=str(e)) - - module.exit_json(msg="OK") - -main() diff --git a/library/notification/twilio b/library/notification/twilio deleted file mode 100644 index 8969c28aa50..00000000000 --- a/library/notification/twilio +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Matt Makai -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -version_added: "1.6" -module: twilio -short_description: Sends a text message to a mobile phone through Twilio. -description: - - Sends a text message to a phone number through an the Twilio SMS service. -notes: - - Like the other notification modules, this one requires an external - dependency to work. In this case, you'll need a Twilio account with - a purchased or verified phone number to send the text message. -options: - account_sid: - description: - user's account id for Twilio found on the account page - required: true - auth_token: - description: user's authentication token for Twilio found on the account page - required: true - msg: - description: - the body of the text message - required: true - to_number: - description: - what phone number to send the text message to, format +15551112222 - required: true - from_number: - description: - what phone number to send the text message from, format +15551112222 - required: true - -requirements: [ urllib, urllib2 ] -author: Matt Makai -''' - -EXAMPLES = ''' -# send a text message from the local server about the build status to (555) 303 5681 -# note: you have to have purchased the 'from_number' on your Twilio account -- local_action: text msg="All servers with webserver role are now configured." - account_sid={{ twilio_account_sid }} - auth_token={{ twilio_auth_token }} - from_number=+15552014545 to_number=+15553035681 - -# send a text message from a server to (555) 111 3232 -# note: you have to have purchased the 'from_number' on your Twilio account -- text: msg="This server's configuration is now complete." - account_sid={{ twilio_account_sid }} - auth_token={{ twilio_auth_token }} - from_number=+15553258899 to_number=+15551113232 - -''' - -# ======================================= -# text module support methods -# -try: - import urllib, urllib2 -except ImportError: - module.fail_json(msg="urllib and urllib2 are required") - -import base64 - - -def post_text(module, account_sid, auth_token, msg, from_number, to_number): - URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ - % (account_sid,) - AGENT = "Ansible/1.5" - - data = {'From':from_number, 'To':to_number, 'Body':msg} - encoded_data = urllib.urlencode(data) - request = urllib2.Request(URI) - base64string = base64.encodestring('%s:%s' % \ - (account_sid, auth_token)).replace('\n', '') - request.add_header('User-Agent', AGENT) - request.add_header('Content-type', 'application/x-www-form-urlencoded') - request.add_header('Accept', 'application/ansible') - request.add_header('Authorization', 'Basic %s' % base64string) - return urllib2.urlopen(request, encoded_data) - - -# ======================================= -# Main -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - account_sid=dict(required=True), - auth_token=dict(required=True), - msg=dict(required=True), - from_number=dict(required=True), - to_number=dict(required=True), - ), - supports_check_mode=True - ) - - account_sid = module.params['account_sid'] - auth_token = module.params['auth_token'] - msg = module.params['msg'] - from_number = module.params['from_number'] - to_number = module.params['to_number'] - - try: - response = post_text(module, account_sid, auth_token, msg, - from_number, to_number) - except Exception, e: - module.fail_json(msg="unable to send text message to %s" % to_number) - - module.exit_json(msg=msg, changed=False) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/typetalk b/library/notification/typetalk deleted file mode 100644 index b987acbe837..00000000000 --- a/library/notification/typetalk +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -DOCUMENTATION = ''' ---- -module: typetalk -version_added: "1.6" -short_description: Send a message to typetalk -description: - - Send a message to typetalk using typetalk API ( http://developers.typetalk.in/ ) -options: - client_id: - description: - - OAuth2 client ID - required: true - client_secret: - description: - - OAuth2 client secret - required: true - topic: - description: - - topic id to post message - required: true - msg: - description: - - message body - required: true -requirements: [ urllib, urllib2, json ] -author: Takashi Someda -''' - -EXAMPLES = ''' -- typetalk: client_id=12345 client_secret=12345 topic=1 msg="install completed" -''' - -try: - import urllib -except ImportError: - urllib = None - -try: - import urllib2 -except ImportError: - urllib2 = None - -try: - import json -except ImportError: - json = None - - -def do_request(url, params, headers={}): - data = urllib.urlencode(params) - headers = dict(headers, **{ - 'User-Agent': 'Ansible/typetalk module', - }) - return urllib2.urlopen(urllib2.Request(url, data, headers)) - - -def get_access_token(client_id, client_secret): - params = { - 'client_id': client_id, - 'client_secret': client_secret, - 'grant_type': 'client_credentials', - 'scope': 'topic.post' - } - res = do_request('https://typetalk.in/oauth2/access_token', params) - return json.load(res)['access_token'] - - -def send_message(client_id, client_secret, topic, msg): - """ - send message to typetalk - """ - try: - access_token = get_access_token(client_id, client_secret) - url = 'https://typetalk.in/api/v1/topics/%d' % topic - headers = { - 'Authorization': 'Bearer %s' % access_token, - } - do_request(url, {'message': msg}, headers) - return True, {'access_token': access_token} - except urllib2.HTTPError, e: - return False, e - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - client_id=dict(required=True), - client_secret=dict(required=True), - topic=dict(required=True, type='int'), - msg=dict(required=True), - ), - supports_check_mode=False - ) - - if not (urllib and urllib2 and json): - module.fail_json(msg="urllib, urllib2 and json modules are required") - - client_id = module.params["client_id"] - client_secret = module.params["client_secret"] - topic = module.params["topic"] - msg = module.params["msg"] - - res, error = send_message(client_id, client_secret, topic, msg) - if not res: - module.fail_json(msg='fail to send message with response code %s' % error.code) - - module.exit_json(changed=True, topic=topic, msg=msg) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/apt b/library/packaging/apt deleted file mode 100755 index b4b3f078a88..00000000000 --- a/library/packaging/apt +++ /dev/null @@ -1,561 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Flowroute LLC -# Written by Matthew Williams -# Based on yum module written by Seth Vidal -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . -# - -DOCUMENTATION = ''' ---- -module: apt -short_description: Manages apt-packages -description: - - Manages I(apt) packages (such as for Debian/Ubuntu). -version_added: "0.0.2" -options: - name: - description: - - A package name, like C(foo), or package specifier with version, like C(foo=1.0). Wildcards (fnmatch) like apt* are also supported. - required: false - default: null - state: - description: - - Indicates the desired package state. C(latest) ensures that the latest version is installed. - required: false - default: present - choices: [ "latest", "absent", "present" ] - update_cache: - description: - - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step. - required: false - default: no - choices: [ "yes", "no" ] - cache_valid_time: - description: - - If C(update_cache) is specified and the last run is less or equal than I(cache_valid_time) seconds ago, the C(update_cache) gets skipped. - required: false - default: no - purge: - description: - - Will force purging of configuration files if the module state is set to I(absent). - required: false - default: no - choices: [ "yes", "no" ] - default_release: - description: - - Corresponds to the C(-t) option for I(apt) and sets pin priorities - required: false - default: null - install_recommends: - description: - - Corresponds to the C(--no-install-recommends) option for I(apt). Default behavior (C(yes)) replicates apt's default behavior; C(no) does not install recommended packages. Suggested packages are never installed. - required: false - default: yes - choices: [ "yes", "no" ] - force: - description: - - If C(yes), force installs/removes. - required: false - default: "no" - choices: [ "yes", "no" ] - upgrade: - description: - - 'If yes or safe, performs an aptitude safe-upgrade.' - - 'If full, performs an aptitude full-upgrade.' - - 'If dist, performs an apt-get dist-upgrade.' - - 'Note: This does not upgrade a specific package, use state=latest for that.' - version_added: "1.1" - required: false - default: "yes" - choices: [ "yes", "safe", "full", "dist"] - dpkg_options: - description: - - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"' - - Options should be supplied as comma separated list - required: false - default: 'force-confdef,force-confold' - deb: - description: - - Path to a .deb package on the remote machine. - required: false - version_added: "1.6" -requirements: [ python-apt, aptitude ] -author: Matthew Williams -notes: - - Three of the upgrade modes (C(full), C(safe) and its alias C(yes)) require C(aptitude), otherwise - C(apt-get) suffices. -''' - -EXAMPLES = ''' -# Update repositories cache and install "foo" package -- apt: name=foo update_cache=yes - -# Remove "foo" package -- apt: name=foo state=absent - -# Install the package "foo" -- apt: name=foo state=present - -# Install the version '1.00' of package "foo" -- apt: name=foo=1.00 state=present - -# Update the repository cache and update package "nginx" to latest version using default release squeeze-backport -- apt: name=nginx state=latest default_release=squeeze-backports update_cache=yes - -# Install latest version of "openjdk-6-jdk" ignoring "install-recommends" -- apt: name=openjdk-6-jdk state=latest install_recommends=no - -# Update all packages to the latest version -- apt: upgrade=dist - -# Run the equivalent of "apt-get update" as a separate step -- apt: update_cache=yes - -# Only run "update_cache=yes" if the last one is more than 3600 seconds ago -- apt: update_cache=yes cache_valid_time=3600 - -# Pass options to dpkg on run -- apt: upgrade=dist update_cache=yes dpkg_options='force-confold,force-confdef' - -# Install a .deb package -- apt: deb=/tmp/mypackage.deb -''' - - -import traceback -# added to stave off future warnings about apt api -import warnings -warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning) - -import os -import datetime -import fnmatch - -# APT related constants -APT_ENV_VARS = dict( - DEBIAN_FRONTEND = 'noninteractive', - DEBIAN_PRIORITY = 'critical' -) - -DPKG_OPTIONS = 'force-confdef,force-confold' -APT_GET_ZERO = "0 upgraded, 0 newly installed" -APTITUDE_ZERO = "0 packages upgraded, 0 newly installed" -APT_LISTS_PATH = "/var/lib/apt/lists" -APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp" - -HAS_PYTHON_APT = True -try: - import apt - import apt.debfile - import apt_pkg -except ImportError: - HAS_PYTHON_APT = False - -def package_split(pkgspec): - parts = pkgspec.split('=') - if len(parts) > 1: - return parts[0], parts[1] - else: - return parts[0], None - -def package_status(m, pkgname, version, cache, state): - try: - # get the package from the cache, as well as the - # the low-level apt_pkg.Package object which contains - # state fields not directly acccesible from the - # higher-level apt.package.Package object. - pkg = cache[pkgname] - ll_pkg = cache._cache[pkgname] # the low-level package object - except KeyError: - if state == 'install': - if cache.get_providing_packages(pkgname): - return False, True, False - m.fail_json(msg="No package matching '%s' is available" % pkgname) - else: - return False, False, False - try: - has_files = len(pkg.installed_files) > 0 - except UnicodeDecodeError: - has_files = True - except AttributeError: - has_files = False # older python-apt cannot be used to determine non-purged - - try: - package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED - except AttributeError: # python-apt 0.7.X has very weak low-level object - try: - # might not be necessary as python-apt post-0.7.X should have current_state property - package_is_installed = pkg.is_installed - except AttributeError: - # assume older version of python-apt is installed - package_is_installed = pkg.isInstalled - - if version and package_is_installed: - try: - installed_version = pkg.installed.version - except AttributeError: - installed_version = pkg.installedVersion - return package_is_installed and fnmatch.fnmatch(installed_version, version), False, has_files - else: - try: - package_is_upgradable = pkg.is_upgradable - except AttributeError: - # assume older version of python-apt is installed - package_is_upgradable = pkg.isUpgradable - return package_is_installed, package_is_upgradable, has_files - -def expand_dpkg_options(dpkg_options_compressed): - options_list = dpkg_options_compressed.split(',') - dpkg_options = "" - for dpkg_option in options_list: - dpkg_options = '%s -o "Dpkg::Options::=--%s"' \ - % (dpkg_options, dpkg_option) - return dpkg_options.strip() - -def expand_pkgspec_from_fnmatches(m, pkgspec, cache): - new_pkgspec = [] - for pkgname_or_fnmatch_pattern in pkgspec: - # note that any of these chars is not allowed in a (debian) pkgname - if [c for c in pkgname_or_fnmatch_pattern if c in "*?[]!"]: - if "=" in pkgname_or_fnmatch_pattern: - m.fail_json(msg="pkgname wildcard and version can not be mixed") - # handle multiarch pkgnames, the idea is that "apt*" should - # only select native packages. But "apt*:i386" should still work - if not ":" in pkgname_or_fnmatch_pattern: - matches = fnmatch.filter( - [pkg.name for pkg in cache - if not ":" in pkg.name], pkgname_or_fnmatch_pattern) - else: - matches = fnmatch.filter( - [pkg.name for pkg in cache], pkgname_or_fnmatch_pattern) - - if len(matches) == 0: - m.fail_json(msg="No package(s) matching '%s' available" % str(pkgname_or_fnmatch_pattern)) - else: - new_pkgspec.extend(matches) - else: - new_pkgspec.append(pkgname_or_fnmatch_pattern) - return new_pkgspec - -def install(m, pkgspec, cache, upgrade=False, default_release=None, - install_recommends=True, force=False, - dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): - packages = "" - pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) - for package in pkgspec: - name, version = package_split(package) - installed, upgradable, has_files = package_status(m, name, version, cache, state='install') - if not installed or (upgrade and upgradable): - packages += "'%s' " % package - - if len(packages) != 0: - if force: - force_yes = '--force-yes' - else: - force_yes = '' - - if m.check_mode: - check_arg = '--simulate' - else: - check_arg = '' - - for (k,v) in APT_ENV_VARS.iteritems(): - os.environ[k] = v - - cmd = "%s -y %s %s %s install %s" % (APT_GET_CMD, dpkg_options, force_yes, check_arg, packages) - - if default_release: - cmd += " -t '%s'" % (default_release,) - if not install_recommends: - cmd += " --no-install-recommends" - - rc, out, err = m.run_command(cmd) - if rc: - return (False, dict(msg="'apt-get install %s' failed: %s" % (packages, err), stdout=out, stderr=err)) - else: - return (True, dict(changed=True, stdout=out, stderr=err)) - else: - return (True, dict(changed=False)) - -def install_deb(m, debs, cache, force, install_recommends, dpkg_options): - changed=False - deps_to_install = [] - pkgs_to_install = [] - for deb_file in debs.split(','): - pkg = apt.debfile.DebPackage(deb_file) - - # Check if it's already installed - if pkg.compare_to_version_in_cache() == pkg.VERSION_SAME: - continue - # Check if package is installable - if not pkg.check(): - m.fail_json(msg=pkg._failure_string) - - # add any missing deps to the list of deps we need - # to install so they're all done in one shot - deps_to_install.extend(pkg.missing_deps) - - # and add this deb to the list of packages to install - pkgs_to_install.append(deb_file) - - # install the deps through apt - retvals = {} - if len(deps_to_install) > 0: - (success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache, - install_recommends=install_recommends, - dpkg_options=expand_dpkg_options(dpkg_options)) - if not success: - m.fail_json(**retvals) - changed = retvals.get('changed', False) - - if len(pkgs_to_install) > 0: - options = ' '.join(["--%s"% x for x in dpkg_options.split(",")]) - if m.check_mode: - options += " --simulate" - if force: - options += " --force-yes" - - cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install)) - rc, out, err = m.run_command(cmd) - if "stdout" in retvals: - stdout = retvals["stdout"] + out - else: - stdout = out - if "stderr" in retvals: - stderr = retvals["stderr"] + err - else: - stderr = err - - if rc == 0: - m.exit_json(changed=True, stdout=stdout, stderr=stderr) - else: - m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr) - else: - m.exit_json(changed=changed, stdout=retvals.get('stdout',''), stderr=retvals.get('stderr','')) - -def remove(m, pkgspec, cache, purge=False, - dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): - packages = "" - pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) - for package in pkgspec: - name, version = package_split(package) - installed, upgradable, has_files = package_status(m, name, version, cache, state='remove') - if installed or (has_files and purge): - packages += "'%s' " % package - - if len(packages) == 0: - m.exit_json(changed=False) - else: - if purge: - purge = '--purge' - else: - purge = '' - - for (k,v) in APT_ENV_VARS.iteritems(): - os.environ[k] = v - - cmd = "%s -q -y %s %s remove %s" % (APT_GET_CMD, dpkg_options, purge, packages) - - if m.check_mode: - m.exit_json(changed=True) - - rc, out, err = m.run_command(cmd) - if rc: - m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err), stdout=out, stderr=err) - m.exit_json(changed=True, stdout=out, stderr=err) - -def upgrade(m, mode="yes", force=False, default_release=None, - dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): - if m.check_mode: - check_arg = '--simulate' - else: - check_arg = '' - - apt_cmd = None - if mode == "dist": - # apt-get dist-upgrade - apt_cmd = APT_GET_CMD - upgrade_command = "dist-upgrade" - elif mode == "full": - # aptitude full-upgrade - apt_cmd = APTITUDE_CMD - upgrade_command = "full-upgrade" - else: - # aptitude safe-upgrade # mode=yes # default - apt_cmd = APTITUDE_CMD - upgrade_command = "safe-upgrade" - - if force: - if apt_cmd == APT_GET_CMD: - force_yes = '--force-yes' - else: - force_yes = '' - else: - force_yes = '' - - apt_cmd_path = m.get_bin_path(apt_cmd, required=True) - - for (k,v) in APT_ENV_VARS.iteritems(): - os.environ[k] = v - - cmd = '%s -y %s %s %s %s' % (apt_cmd_path, dpkg_options, - force_yes, check_arg, upgrade_command) - - if default_release: - cmd += " -t '%s'" % (default_release,) - - rc, out, err = m.run_command(cmd) - if rc: - m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out) - if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out): - m.exit_json(changed=False, msg=out, stdout=out, stderr=err) - m.exit_json(changed=True, msg=out, stdout=out, stderr=err) - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(default='installed', choices=['installed', 'latest', 'removed', 'absent', 'present']), - update_cache = dict(default=False, aliases=['update-cache'], type='bool'), - cache_valid_time = dict(type='int'), - purge = dict(default=False, type='bool'), - package = dict(default=None, aliases=['pkg', 'name'], type='list'), - deb = dict(default=None), - default_release = dict(default=None, aliases=['default-release']), - install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'), - force = dict(default='no', type='bool'), - upgrade = dict(choices=['yes', 'safe', 'full', 'dist']), - dpkg_options = dict(default=DPKG_OPTIONS) - ), - mutually_exclusive = [['package', 'upgrade', 'deb']], - required_one_of = [['package', 'upgrade', 'update_cache', 'deb']], - supports_check_mode = True - ) - - if not HAS_PYTHON_APT: - try: - module.run_command('apt-get update && apt-get install python-apt -y -q', use_unsafe_shell=True, check_rc=True) - global apt, apt_pkg - import apt - import apt_pkg - except ImportError: - module.fail_json(msg="Could not import python modules: apt, apt_pkg. Please install python-apt package.") - - global APTITUDE_CMD - APTITUDE_CMD = module.get_bin_path("aptitude", False) - global APT_GET_CMD - APT_GET_CMD = module.get_bin_path("apt-get") - - p = module.params - if not APTITUDE_CMD and p.get('upgrade', None) in [ 'full', 'safe', 'yes' ]: - module.fail_json(msg="Could not find aptitude. Please ensure it is installed.") - - install_recommends = p['install_recommends'] - dpkg_options = expand_dpkg_options(p['dpkg_options']) - - try: - cache = apt.Cache() - if p['default_release']: - try: - apt_pkg.config['APT::Default-Release'] = p['default_release'] - except AttributeError: - apt_pkg.Config['APT::Default-Release'] = p['default_release'] - # reopen cache w/ modified config - cache.open(progress=None) - - if p['update_cache']: - # Default is: always update the cache - cache_valid = False - if p['cache_valid_time']: - tdelta = datetime.timedelta(seconds=p['cache_valid_time']) - try: - mtime = os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime - except: - mtime = False - if mtime is False: - # Looks like the update-success-stamp is not available - # Fallback: Checking the mtime of the lists - try: - mtime = os.stat(APT_LISTS_PATH).st_mtime - except: - mtime = False - if mtime is False: - # No mtime could be read - looks like lists are not there - # We update the cache to be safe - cache_valid = False - else: - mtimestamp = datetime.datetime.fromtimestamp(mtime) - if mtimestamp + tdelta >= datetime.datetime.now(): - # dont update the cache - # the old cache is less than cache_valid_time seconds old - so still valid - cache_valid = True - - if cache_valid is not True: - cache.update() - cache.open(progress=None) - if not p['package'] and not p['upgrade'] and not p['deb']: - module.exit_json(changed=False) - - force_yes = p['force'] - - if p['upgrade']: - upgrade(module, p['upgrade'], force_yes, - p['default_release'], dpkg_options) - - if p['deb']: - if p['state'] != "installed": - module.fail_json(msg="deb only supports state=installed") - install_deb(module, p['deb'], cache, - install_recommends=install_recommends, - force=force_yes, dpkg_options=p['dpkg_options']) - - packages = p['package'] - latest = p['state'] == 'latest' - for package in packages: - if package.count('=') > 1: - module.fail_json(msg="invalid package spec: %s" % package) - if latest and '=' in package: - module.fail_json(msg='version number inconsistent with state=latest: %s' % package) - - if p['state'] == 'latest': - result = install(module, packages, cache, upgrade=True, - default_release=p['default_release'], - install_recommends=install_recommends, - force=force_yes, dpkg_options=dpkg_options) - (success, retvals) = result - if success: - module.exit_json(**retvals) - else: - module.fail_json(**retvals) - elif p['state'] in [ 'installed', 'present' ]: - result = install(module, packages, cache, default_release=p['default_release'], - install_recommends=install_recommends,force=force_yes, - dpkg_options=dpkg_options) - (success, retvals) = result - if success: - module.exit_json(**retvals) - else: - module.fail_json(**retvals) - elif p['state'] in [ 'removed', 'absent' ]: - remove(module, packages, cache, p['purge'], dpkg_options) - - except apt.cache.LockFailedException: - module.fail_json(msg="Failed to lock apt for exclusive operation") - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/apt_key b/library/packaging/apt_key deleted file mode 100644 index 0a483a97bbc..00000000000 --- a/library/packaging/apt_key +++ /dev/null @@ -1,277 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# (c) 2012, Jayson Vantuyl -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: apt_key -author: Jayson Vantuyl & others -version_added: "1.0" -short_description: Add or remove an apt key -description: - - Add or remove an I(apt) key, optionally downloading it -notes: - - doesn't download the key unless it really needs it - - as a sanity check, downloaded key id must match the one specified - - best practice is to specify the key id and the url -options: - id: - required: false - default: none - description: - - identifier of key - data: - required: false - default: none - description: - - keyfile contents - file: - required: false - default: none - description: - - keyfile path - keyring: - required: false - default: none - description: - - path to specific keyring file in /etc/apt/trusted.gpg.d - version_added: "1.3" - url: - required: false - default: none - description: - - url to retrieve key from. - keyserver: - version_added: "1.6" - required: false - default: none - description: - - keyserver to retrieve key from. - state: - required: false - choices: [ absent, present ] - default: present - description: - - used to specify if key is being added or revoked - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - -''' - -EXAMPLES = ''' -# Add an Apt signing key, uses whichever key is at the URL -- apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present - -# Add an Apt signing key, will not download if present -- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present - -# Remove an Apt signing key, uses whichever key is at the URL -- apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=absent - -# Remove a Apt specific signing key, leading 0x is valid -- apt_key: id=0x473041FA state=absent - -# Add a key from a file on the Ansible server -- apt_key: data="{{ lookup('file', 'apt.gpg') }}" state=present - -# Add an Apt signing key to a specific keyring file -- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc keyring=/etc/apt/trusted.gpg.d/debian.gpg state=present -''' - - -# FIXME: standardize into module_common -from traceback import format_exc -from re import compile as re_compile -# FIXME: standardize into module_common -from distutils.spawn import find_executable -from os import environ -from sys import exc_info -import traceback - -match_key = re_compile("^gpg:.*key ([0-9a-fA-F]+):.*$") - -REQUIRED_EXECUTABLES=['gpg', 'grep', 'apt-key'] - - -def check_missing_binaries(module): - missing = [e for e in REQUIRED_EXECUTABLES if not find_executable(e)] - if len(missing): - module.fail_json(msg="binaries are missing", names=missing) - -def all_keys(module, keyring, short_format): - if keyring: - cmd = "apt-key --keyring %s adv --list-public-keys --keyid-format=long" % keyring - else: - cmd = "apt-key adv --list-public-keys --keyid-format=long" - (rc, out, err) = module.run_command(cmd) - results = [] - lines = out.split('\n') - for line in lines: - if line.startswith("pub"): - tokens = line.split() - code = tokens[1] - (len_type, real_code) = code.split("/") - results.append(real_code) - if short_format: - results = shorten_key_ids(results) - return results - -def shorten_key_ids(key_id_list): - """ - Takes a list of key ids, and converts them to the 'short' format, - by reducing them to their last 8 characters. - """ - short = [] - for key in key_id_list: - short.append(key[-8:]) - return short - -def download_key(module, url): - # FIXME: move get_url code to common, allow for in-memory D/L, support proxies - # and reuse here - if url is None: - module.fail_json(msg="needed a URL but was not specified") - - try: - rsp, info = fetch_url(module, url) - if info['status'] != 200: - module.fail_json(msg="Failed to download key at %s: %s" % (url, info['msg'])) - - return rsp.read() - except Exception: - module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc()) - -def import_key(module, keyserver, key_id): - cmd = "apt-key adv --keyserver %s --recv %s" % (keyserver, key_id) - (rc, out, err) = module.run_command(cmd, check_rc=True) - return True - -def add_key(module, keyfile, keyring, data=None): - if data is not None: - if keyring: - cmd = "apt-key --keyring %s add -" % keyring - else: - cmd = "apt-key add -" - (rc, out, err) = module.run_command(cmd, data=data, check_rc=True, binary_data=True) - else: - if keyring: - cmd = "apt-key --keyring %s add %s" % (keyring, keyfile) - else: - cmd = "apt-key add %s" % (keyfile) - (rc, out, err) = module.run_command(cmd, check_rc=True) - return True - -def remove_key(module, key_id, keyring): - # FIXME: use module.run_command, fail at point of error and don't discard useful stdin/stdout - if keyring: - cmd = 'apt-key --keyring %s del %s' % (keyring, key_id) - else: - cmd = 'apt-key del %s' % key_id - (rc, out, err) = module.run_command(cmd, check_rc=True) - return True - -def main(): - module = AnsibleModule( - argument_spec=dict( - id=dict(required=False, default=None), - url=dict(required=False), - data=dict(required=False), - file=dict(required=False), - key=dict(required=False), - keyring=dict(required=False), - validate_certs=dict(default='yes', type='bool'), - keyserver=dict(required=False), - state=dict(required=False, choices=['present', 'absent'], default='present') - ), - supports_check_mode=True - ) - - key_id = module.params['id'] - url = module.params['url'] - data = module.params['data'] - filename = module.params['file'] - keyring = module.params['keyring'] - state = module.params['state'] - keyserver = module.params['keyserver'] - changed = False - - if key_id: - try: - _ = int(key_id, 16) - if key_id.startswith('0x'): - key_id = key_id[2:] - key_id = key_id.upper() - except ValueError: - module.fail_json(msg="Invalid key_id", id=key_id) - - # FIXME: I think we have a common facility for this, if not, want - check_missing_binaries(module) - - short_format = (key_id is not None and len(key_id) == 8) - keys = all_keys(module, keyring, short_format) - return_values = {} - - if state == 'present': - if key_id and key_id in keys: - module.exit_json(changed=False) - else: - if not filename and not data and not keyserver: - data = download_key(module, url) - if key_id and key_id in keys: - module.exit_json(changed=False) - else: - if module.check_mode: - module.exit_json(changed=True) - if filename: - add_key(module, filename, keyring) - elif keyserver: - import_key(module, keyserver, key_id) - else: - add_key(module, "-", keyring, data) - changed=False - keys2 = all_keys(module, keyring, short_format) - if len(keys) != len(keys2): - changed=True - if key_id and not key_id[-16:] in keys2: - module.fail_json(msg="key does not seem to have been added", id=key_id) - module.exit_json(changed=changed) - elif state == 'absent': - if not key_id: - module.fail_json(msg="key is required") - if key_id in keys: - if module.check_mode: - module.exit_json(changed=True) - if remove_key(module, key_id, keyring): - changed=True - else: - # FIXME: module.fail_json or exit-json immediately at point of failure - module.fail_json(msg="error removing key_id", **return_values) - - module.exit_json(changed=changed, **return_values) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() diff --git a/library/packaging/apt_repository b/library/packaging/apt_repository deleted file mode 100644 index 2ee5819fc4e..00000000000 --- a/library/packaging/apt_repository +++ /dev/null @@ -1,446 +0,0 @@ -#!/usr/bin/python -# encoding: utf-8 - -# (c) 2012, Matt Wright -# (c) 2013, Alexander Saltanov -# (c) 2014, Rutger Spiertz -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - -DOCUMENTATION = ''' ---- -module: apt_repository -short_description: Add and remove APT repositories -description: - - Add or remove an APT repositories in Ubuntu and Debian. -notes: - - This module works on Debian and Ubuntu and requires C(python-apt). - - This module supports Debian Squeeze (version 6) as well as its successors. - - This module treats Debian and Ubuntu distributions separately. So PPA could be installed only on Ubuntu machines. -options: - repo: - required: true - default: none - description: - - A source string for the repository. - state: - required: false - choices: [ "absent", "present" ] - default: "present" - description: - - A source string state. - mode: - required: false - default: 0644 - description: - - The octal mode for newly created files in sources.list.d - version_added: "1.6" - update_cache: - description: - - Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes. - required: false - default: "yes" - choices: [ "yes", "no" ] - validate_certs: - version_added: '1.8' - description: - - If C(no), SSL certificates for the target repo will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] -author: Alexander Saltanov -version_added: "0.7" -requirements: [ python-apt ] -''' - -EXAMPLES = ''' -# Add specified repository into sources list. -apt_repository: repo='deb http://archive.canonical.com/ubuntu hardy partner' state=present - -# Add source repository into sources list. -apt_repository: repo='deb-src http://archive.canonical.com/ubuntu hardy partner' state=present - -# Remove specified repository from sources list. -apt_repository: repo='deb http://archive.canonical.com/ubuntu hardy partner' state=absent - -# On Ubuntu target: add nginx stable repository from PPA and install its signing key. -# On Debian target: adding PPA is not available, so it will fail immediately. -apt_repository: repo='ppa:nginx/stable' -''' - -import glob -import os -import re -import tempfile - -try: - import apt - import apt_pkg - import aptsources.distro as aptsources_distro - distro = aptsources_distro.get_distro() - HAVE_PYTHON_APT = True -except ImportError: - distro = None - HAVE_PYTHON_APT = False - - -VALID_SOURCE_TYPES = ('deb', 'deb-src') - -def install_python_apt(module): - - if not module.check_mode: - apt_get_path = module.get_bin_path('apt-get') - if apt_get_path: - rc, so, se = module.run_command('%s update && %s install python-apt -y -q' % (apt_get_path, apt_get_path), use_unsafe_shell=True) - if rc == 0: - global apt, apt_pkg, aptsources_distro, distro, HAVE_PYTHON_APT - import apt - import apt_pkg - import aptsources.distro as aptsources_distro - distro = aptsources_distro.get_distro() - HAVE_PYTHON_APT = True - else: - module.fail_json(msg="Failed to auto-install python-apt. Error was: '%s'" % se.strip()) - -class InvalidSource(Exception): - pass - - -# Simple version of aptsources.sourceslist.SourcesList. -# No advanced logic and no backups inside. -class SourcesList(object): - def __init__(self): - self.files = {} # group sources by file - self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist') - - # read sources.list if it exists - if os.path.isfile(self.default_file): - self.load(self.default_file) - - # read sources.list.d - for file in glob.iglob('%s/*.list' % self._apt_cfg_dir('Dir::Etc::sourceparts')): - self.load(file) - - def __iter__(self): - '''Simple iterator to go over all sources. Empty, non-source, and other not valid lines will be skipped.''' - for file, sources in self.files.items(): - for n, valid, enabled, source, comment in sources: - if valid: - yield file, n, enabled, source, comment - raise StopIteration - - def _expand_path(self, filename): - if '/' in filename: - return filename - else: - return os.path.abspath(os.path.join(self._apt_cfg_dir('Dir::Etc::sourceparts'), filename)) - - def _suggest_filename(self, line): - def _cleanup_filename(s): - return '_'.join(re.sub('[^a-zA-Z0-9]', ' ', s).split()) - def _strip_username_password(s): - if '@' in s: - s = s.split('@', 1) - s = s[-1] - return s - - # Drop options and protocols. - line = re.sub('\[[^\]]+\]', '', line) - line = re.sub('\w+://', '', line) - - # split line into valid keywords - parts = [part for part in line.split() if part not in VALID_SOURCE_TYPES] - - # Drop usernames and passwords - parts[0] = _strip_username_password(parts[0]) - - return '%s.list' % _cleanup_filename(' '.join(parts[:1])) - - def _parse(self, line, raise_if_invalid_or_disabled=False): - valid = False - enabled = True - source = '' - comment = '' - - line = line.strip() - if line.startswith('#'): - enabled = False - line = line[1:] - - # Check for another "#" in the line and treat a part after it as a comment. - i = line.find('#') - if i > 0: - comment = line[i+1:].strip() - line = line[:i] - - # Split a source into substring to make sure that it is source spec. - # Duplicated whitespaces in a valid source spec will be removed. - source = line.strip() - if source: - chunks = source.split() - if chunks[0] in VALID_SOURCE_TYPES: - valid = True - source = ' '.join(chunks) - - if raise_if_invalid_or_disabled and (not valid or not enabled): - raise InvalidSource(line) - - return valid, enabled, source, comment - - @staticmethod - def _apt_cfg_file(filespec): - ''' - Wrapper for `apt_pkg` module for running with Python 2.5 - ''' - try: - result = apt_pkg.config.find_file(filespec) - except AttributeError: - result = apt_pkg.Config.FindFile(filespec) - return result - - @staticmethod - def _apt_cfg_dir(dirspec): - ''' - Wrapper for `apt_pkg` module for running with Python 2.5 - ''' - try: - result = apt_pkg.config.find_dir(dirspec) - except AttributeError: - result = apt_pkg.Config.FindDir(dirspec) - return result - - def load(self, file): - group = [] - f = open(file, 'r') - for n, line in enumerate(f): - valid, enabled, source, comment = self._parse(line) - group.append((n, valid, enabled, source, comment)) - self.files[file] = group - - def save(self, module): - for filename, sources in self.files.items(): - if sources: - d, fn = os.path.split(filename) - fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d) - - # allow the user to override the default mode - this_mode = module.params['mode'] - module.set_mode_if_different(tmp_path, this_mode, False) - - f = os.fdopen(fd, 'w') - for n, valid, enabled, source, comment in sources: - chunks = [] - if not enabled: - chunks.append('# ') - chunks.append(source) - if comment: - chunks.append(' # ') - chunks.append(comment) - chunks.append('\n') - line = ''.join(chunks) - - try: - f.write(line) - except IOError, err: - module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, unicode(err))) - module.atomic_move(tmp_path, filename) - else: - del self.files[filename] - if os.path.exists(filename): - os.remove(filename) - - def dump(self): - return '\n'.join([str(i) for i in self]) - - def modify(self, file, n, enabled=None, source=None, comment=None): - ''' - This function to be used with iterator, so we don't care of invalid sources. - If source, enabled, or comment is None, original value from line ``n`` will be preserved. - ''' - valid, enabled_old, source_old, comment_old = self.files[file][n][1:] - choice = lambda new, old: old if new is None else new - self.files[file][n] = (n, valid, choice(enabled, enabled_old), choice(source, source_old), choice(comment, comment_old)) - - def _add_valid_source(self, source_new, comment_new, file): - # We'll try to reuse disabled source if we have it. - # If we have more than one entry, we will enable them all - no advanced logic, remember. - found = False - for filename, n, enabled, source, comment in self: - if source == source_new: - self.modify(filename, n, enabled=True) - found = True - - if not found: - if file is None: - file = self.default_file - else: - file = self._expand_path(file) - - if file not in self.files: - self.files[file] = [] - - files = self.files[file] - files.append((len(files), True, True, source_new, comment_new)) - - def add_source(self, line, comment='', file=None): - source = self._parse(line, raise_if_invalid_or_disabled=True)[2] - - # Prefer separate files for new sources. - self._add_valid_source(source, comment, file=file or self._suggest_filename(source)) - - def _remove_valid_source(self, source): - # If we have more than one entry, we will remove them all (not comment, remove!) - for filename, n, enabled, src, comment in self: - if source == src and enabled: - self.files[filename].pop(n) - - def remove_source(self, line): - source = self._parse(line, raise_if_invalid_or_disabled=True)[2] - self._remove_valid_source(source) - - -class UbuntuSourcesList(SourcesList): - - LP_API = 'https://launchpad.net/api/1.0/~%s/+archive/%s' - - def __init__(self, module, add_ppa_signing_keys_callback=None): - self.module = module - self.add_ppa_signing_keys_callback = add_ppa_signing_keys_callback - super(UbuntuSourcesList, self).__init__() - - def _get_ppa_info(self, owner_name, ppa_name): - lp_api = self.LP_API % (owner_name, ppa_name) - - headers = dict(Accept='application/json') - response, info = fetch_url(self.module, lp_api, headers=headers) - if info['status'] != 200: - self.module.fail_json(msg="failed to fetch PPA information, error was: %s" % info['msg']) - return json.load(response) - - def _expand_ppa(self, path): - ppa = path.split(':')[1] - ppa_owner = ppa.split('/')[0] - try: - ppa_name = ppa.split('/')[1] - except IndexError: - ppa_name = 'ppa' - - line = 'deb http://ppa.launchpad.net/%s/%s/ubuntu %s main' % (ppa_owner, ppa_name, distro.codename) - return line, ppa_owner, ppa_name - - def _key_already_exists(self, key_fingerprint): - rc, out, err = self.module.run_command('apt-key export %s' % key_fingerprint, check_rc=True) - return len(err) == 0 - - def add_source(self, line, comment='', file=None): - if line.startswith('ppa:'): - source, ppa_owner, ppa_name = self._expand_ppa(line) - - if self.add_ppa_signing_keys_callback is not None: - info = self._get_ppa_info(ppa_owner, ppa_name) - if not self._key_already_exists(info['signing_key_fingerprint']): - command = ['apt-key', 'adv', '--recv-keys', '--keyserver', 'hkp://keyserver.ubuntu.com:80', info['signing_key_fingerprint']] - self.add_ppa_signing_keys_callback(command) - - file = file or self._suggest_filename('%s_%s' % (line, distro.codename)) - else: - source = self._parse(line, raise_if_invalid_or_disabled=True)[2] - file = file or self._suggest_filename(source) - self._add_valid_source(source, comment, file) - - def remove_source(self, line): - if line.startswith('ppa:'): - source = self._expand_ppa(line)[0] - else: - source = self._parse(line, raise_if_invalid_or_disabled=True)[2] - self._remove_valid_source(source) - - -def get_add_ppa_signing_key_callback(module): - def _run_command(command): - module.run_command(command, check_rc=True) - - if module.check_mode: - return None - else: - return _run_command - - -def main(): - module = AnsibleModule( - argument_spec=dict( - repo=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - mode=dict(required=False, default=0644), - update_cache = dict(aliases=['update-cache'], type='bool', default='yes'), - # this should not be needed, but exists as a failsafe - install_python_apt=dict(required=False, default="yes", type='bool'), - validate_certs = dict(default='yes', type='bool'), - ), - supports_check_mode=True, - ) - - params = module.params - if params['install_python_apt'] and not HAVE_PYTHON_APT and not module.check_mode: - install_python_apt(module) - - repo = module.params['repo'] - state = module.params['state'] - update_cache = module.params['update_cache'] - sourceslist = None - - if HAVE_PYTHON_APT: - if isinstance(distro, aptsources_distro.UbuntuDistribution): - sourceslist = UbuntuSourcesList(module, - add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module)) - elif HAVE_PYTHON_APT and \ - isinstance(distro, aptsources_distro.DebianDistribution) or isinstance(distro, aptsources_distro.Distribution): - sourceslist = SourcesList() - else: - module.fail_json(msg='Module apt_repository supports only Debian and Ubuntu. ' + \ - 'You may be seeing this because python-apt is not installed, but you requested that it not be auto-installed') - - sources_before = sourceslist.dump() - - try: - if state == 'present': - sourceslist.add_source(repo) - elif state == 'absent': - sourceslist.remove_source(repo) - except InvalidSource, err: - module.fail_json(msg='Invalid repository string: %s' % unicode(err)) - - sources_after = sourceslist.dump() - changed = sources_before != sources_after - - if not module.check_mode and changed: - try: - sourceslist.save(module) - if update_cache: - cache = apt.Cache() - cache.update() - except OSError, err: - module.fail_json(msg=unicode(err)) - - module.exit_json(changed=changed, repo=repo, state=state) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/packaging/apt_rpm b/library/packaging/apt_rpm deleted file mode 100755 index a85c528a239..00000000000 --- a/library/packaging/apt_rpm +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -tt -# -*- coding: utf-8 -*- - -# (c) 2013, Evgenii Terechkov -# Written by Evgenii Terechkov -# Based on urpmi module written by Philippe Makowski -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - - -DOCUMENTATION = ''' ---- -module: apt_rpm -short_description: apt_rpm package manager -description: - - Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required. -version_added: "1.5" -options: - pkg: - description: - - name of package to install, upgrade or remove. - required: true - default: null - state: - description: - - Indicates the desired package state - required: false - default: present - choices: [ "absent", "present" ] - update_cache: - description: - - update the package database first C(apt-get update). - required: false - default: no - choices: [ "yes", "no" ] -author: Evgenii Terechkov -notes: [] -''' - -EXAMPLES = ''' -# install package foo -- apt_rpm: pkg=foo state=present -# remove package foo -- apt_rpm: pkg=foo state=absent -# description: remove packages foo and bar -- apt_rpm: pkg=foo,bar state=absent -# description: update the package database and install bar (bar will be the updated if a newer version exists) -- apt_rpm: name=bar state=present update_cache=yes -''' - - -try: - import json -except ImportError: - import simplejson as json - -import shlex -import os -import sys - -APT_PATH="/usr/bin/apt-get" -RPM_PATH="/usr/bin/rpm" - -def query_package(module, name): - # rpm -q returns 0 if the package is installed, - # 1 if it is not installed - rc = os.system("%s -q %s" % (RPM_PATH,name)) - if rc == 0: - return True - else: - return False - -def query_package_provides(module, name): - # rpm -q returns 0 if the package is installed, - # 1 if it is not installed - rc = os.system("%s -q --provides %s >/dev/null" % (RPM_PATH,name)) - return rc == 0 - -def update_package_db(module): - rc = os.system("%s update" % APT_PATH) - - if rc != 0: - module.fail_json(msg="could not update package db") - -def remove_packages(module, packages): - - remove_c = 0 - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, package): - continue - - rc = os.system("%s -y remove %s > /dev/null" % (APT_PATH,package)) - - if rc != 0: - module.fail_json(msg="failed to remove %s" % (package)) - - remove_c += 1 - - if remove_c > 0: - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, pkgspec): - - packages = "" - for package in pkgspec: - if not query_package_provides(module, package): - packages += "'%s' " % package - - if len(packages) != 0: - - cmd = ("%s -y install %s > /dev/null" % (APT_PATH, packages)) - - rc, out, err = module.run_command(cmd) - - installed = True - for packages in pkgspec: - if not query_package_provides(module, package): - installed = False - - # apt-rpm always have 0 for exit code if --force is used - if rc or not installed: - module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err)) - else: - module.exit_json(changed=True, msg="%s present(s)" % packages) - else: - module.exit_json(changed=False) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']), - update_cache = dict(default=False, aliases=['update-cache'], type='bool'), - package = dict(aliases=['pkg', 'name'], required=True))) - - - if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH): - module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm") - - p = module.params - - if p['update_cache']: - update_package_db(module) - - packages = p['package'].split(',') - - if p['state'] in [ 'installed', 'present' ]: - install_packages(module, packages) - - elif p['state'] in [ 'removed', 'absent' ]: - remove_packages(module, packages) - -# this is magic, see lib/ansible/module_common.py -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/composer b/library/packaging/composer deleted file mode 100644 index 2930018bd9f..00000000000 --- a/library/packaging/composer +++ /dev/null @@ -1,164 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Dimitrios Tydeas Mengidis - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: composer -author: Dimitrios Tydeas Mengidis -short_description: Dependency Manager for PHP -version_added: "1.6" -description: - - Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs and it will install them in your project for you -options: - command: - version_added: "1.8" - description: - - Composer command like "install", "update" and so on - required: false - default: install - working_dir: - description: - - Directory of your project ( see --working-dir ) - required: true - default: null - aliases: [ "working-dir" ] - prefer_source: - description: - - Forces installation from package sources when possible ( see --prefer-source ) - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [ "prefer-source" ] - prefer_dist: - description: - - Forces installation from package dist even for de versions ( see --prefer-dist ) - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [ "prefer-dist" ] - no_dev: - description: - - Disables installation of require-dev packages ( see --no-dev ) - required: false - default: "yes" - choices: [ "yes", "no" ] - aliases: [ "no-dev" ] - no_scripts: - description: - - Skips the execution of all scripts defined in composer.json ( see --no-scripts ) - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [ "no-scripts" ] - no_plugins: - description: - - Disables all plugins ( see --no-plugins ) - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [ "no-plugins" ] - optimize_autoloader: - description: - - Optimize autoloader during autoloader dump ( see --optimize-autoloader ). Convert PSR-0/4 autoloading to classmap to get a faster autoloader. This is recommended especially for production, but can take a bit of time to run so it is currently not done by default. - required: false - default: "yes" - choices: [ "yes", "no" ] - aliases: [ "optimize-autoloader" ] -requirements: - - php - - composer installed in bin path (recommended /usr/local/bin) -notes: - - Default options that are always appended in each execution are --no-ansi, --no-progress, and --no-interaction -''' - -EXAMPLES = ''' -# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock -- composer: command=install working_dir=/path/to/project -''' - -import os -import re - -def parse_out(string): - return re.sub("\s+", " ", string).strip() - -def has_changed(string): - return (re.match("Nothing to install or update", string) != None) - -def composer_install(module, command, options): - php_path = module.get_bin_path("php", True, ["/usr/local/bin"]) - composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"]) - cmd = "%s %s %s %s" % (php_path, composer_path, command, " ".join(options)) - - return module.run_command(cmd) - -def main(): - module = AnsibleModule( - argument_spec = dict( - command = dict(default="install", type="str", required=False), - working_dir = dict(aliases=["working-dir"], required=True), - prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]), - prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]), - no_dev = dict(default="yes", type="bool", aliases=["no-dev"]), - no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]), - no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]), - optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]), - ), - supports_check_mode=True - ) - - module.params["working_dir"] = os.path.abspath(module.params["working_dir"]) - - options = set([]) - # Default options - options.add("--no-ansi") - options.add("--no-progress") - options.add("--no-interaction") - - if module.check_mode: - options.add("--dry-run") - - # Get composer command with fallback to default - command = module.params['command'] - del module.params['command']; - - # Prepare options - for i in module.params: - opt = "--%s" % i.replace("_","-") - p = module.params[i] - if isinstance(p, (bool)) and p: - options.add(opt) - elif isinstance(p, (str)): - options.add("%s=%s" % (opt, p)) - - rc, out, err = composer_install(module, command, options) - - if rc != 0: - output = parse_out(err) - module.fail_json(msg=output) - else: - output = parse_out(out) - module.exit_json(changed=has_changed(output), msg=output) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/cpanm b/library/packaging/cpanm deleted file mode 100644 index 5b1a9878d21..00000000000 --- a/library/packaging/cpanm +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Franck Cuny -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: cpanm -short_description: Manages Perl library dependencies. -description: - - Manage Perl library dependencies. -version_added: "1.6" -options: - name: - description: - - The name of the Perl library to install - required: false - default: null - aliases: ["pkg"] - from_path: - description: - - The local directory from where to install - required: false - default: null - notest: - description: - - Do not run unit tests - required: false - default: false - locallib: - description: - - Specify the install base to install modules - required: false - default: false - mirror: - description: - - Specifies the base URL for the CPAN mirror to use - required: false - default: false -examples: - - code: "cpanm: name=Dancer" - description: Install I(Dancer) perl package. - - code: "cpanm: name=Dancer locallib=/srv/webapps/my_app/extlib" - description: "Install I(Dancer) (U(http://perldancer.org/)) into the specified I(locallib)" - - code: "cpanm: from_path=/srv/webapps/my_app/src/" - description: Install perl dependencies from local directory. - - code: "cpanm: name=Dancer notest=True locallib=/srv/webapps/my_app/extlib" - description: Install I(Dancer) perl package without running the unit tests in indicated I(locallib). - - code: "cpanm: name=Dancer mirror=http://cpan.cpantesters.org/" - description: Install I(Dancer) perl package from a specific mirror -notes: - - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. -author: Franck Cuny -''' - -def _is_package_installed(module, name, locallib, cpanm): - cmd = "" - if locallib: - os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib - cmd = "%s perl -M%s -e '1'" % (cmd, name) - res, stdout, stderr = module.run_command(cmd, check_rc=False) - if res == 0: - return True - else: - return False - -def _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm): - # this code should use "%s" like everything else and just return early but not fixing all of it now. - # don't copy stuff like this - if from_path: - cmd = "{cpanm} {path}".format(cpanm=cpanm, path=from_path) - else: - cmd = "{cpanm} {name}".format(cpanm=cpanm, name=name) - - if notest is True: - cmd = "{cmd} -n".format(cmd=cmd) - - if locallib is not None: - cmd = "{cmd} -l {locallib}".format(cmd=cmd, locallib=locallib) - - if mirror is not None: - cmd = "{cmd} --mirror {mirror}".format(cmd=cmd, mirror=mirror) - - return cmd - - -def main(): - arg_spec = dict( - name=dict(default=None, required=False, aliases=['pkg']), - from_path=dict(default=None, required=False), - notest=dict(default=False, type='bool'), - locallib=dict(default=None, required=False), - mirror=dict(default=None, required=False) - ) - - module = AnsibleModule( - argument_spec=arg_spec, - required_one_of=[['name', 'from_path']], - ) - - cpanm = module.get_bin_path('cpanm', True) - name = module.params['name'] - from_path = module.params['from_path'] - notest = module.boolean(module.params.get('notest', False)) - locallib = module.params['locallib'] - mirror = module.params['mirror'] - - changed = False - - installed = _is_package_installed(module, name, locallib, cpanm) - - if not installed: - out_cpanm = err_cpanm = '' - cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm) - - rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False) - - if rc_cpanm != 0: - module.fail_json(msg=err_cpanm, cmd=cmd) - - if err_cpanm and 'is up to date' not in err_cpanm: - changed = True - - module.exit_json(changed=changed, binary=cpanm, name=name) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/easy_install b/library/packaging/easy_install deleted file mode 100644 index 889a81f025a..00000000000 --- a/library/packaging/easy_install +++ /dev/null @@ -1,188 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Matt Wright -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -import tempfile -import os.path - -DOCUMENTATION = ''' ---- -module: easy_install -short_description: Installs Python libraries -description: - - Installs Python libraries, optionally in a I(virtualenv) -version_added: "0.7" -options: - name: - description: - - A Python library name - required: true - default: null - aliases: [] - virtualenv: - description: - - an optional I(virtualenv) directory path to install into. If the - I(virtualenv) does not exist, it is created automatically - required: false - default: null - virtualenv_site_packages: - version_added: "1.1" - description: - - Whether the virtual environment will inherit packages from the - global site-packages directory. Note that if this setting is - changed on an already existing virtual environment it will not - have any effect, the environment must be deleted and newly - created. - required: false - default: "no" - choices: [ "yes", "no" ] - virtualenv_command: - version_added: "1.1" - description: - - The command to create the virtual environment with. For example - C(pyvenv), C(virtualenv), C(virtualenv2). - required: false - default: virtualenv - executable: - description: - - The explicit executable or a pathname to the executable to be used to - run easy_install for a specific version of Python installed in the - system. For example C(easy_install-3.3), if there are both Python 2.7 - and 3.3 installations in the system and you want to run easy_install - for the Python 3.3 installation. - version_added: "1.3" - required: false - default: null -notes: - - Please note that the M(easy_install) module can only install Python - libraries. Thus this module is not able to remove libraries. It is - generally recommended to use the M(pip) module which you can first install - using M(easy_install). - - Also note that I(virtualenv) must be installed on the remote host if the - C(virtualenv) parameter is specified. -requirements: [ "virtualenv" ] -author: Matt Wright -''' - -EXAMPLES = ''' -# Examples from Ansible Playbooks -- easy_install: name=pip - -# Install Bottle into the specified virtualenv. -- easy_install: name=bottle virtualenv=/webapps/myapp/venv -''' - -def _is_package_installed(module, name, easy_install): - cmd = '%s --dry-run %s' % (easy_install, name) - rc, status_stdout, status_stderr = module.run_command(cmd) - return not ('Reading' in status_stdout or 'Downloading' in status_stdout) - - -def _get_easy_install(module, env=None, executable=None): - candidate_easy_inst_basenames = ['easy_install'] - easy_install = None - if executable is not None: - if os.path.isabs(executable): - easy_install = executable - else: - candidate_easy_inst_basenames.insert(0, executable) - if easy_install is None: - if env is None: - opt_dirs = [] - else: - # Try easy_install with the virtualenv directory first. - opt_dirs = ['%s/bin' % env] - for basename in candidate_easy_inst_basenames: - easy_install = module.get_bin_path(basename, False, opt_dirs) - if easy_install is not None: - break - # easy_install should have been found by now. The final call to - # get_bin_path will trigger fail_json. - if easy_install is None: - basename = candidate_easy_inst_basenames[0] - easy_install = module.get_bin_path(basename, True, opt_dirs) - return easy_install - - -def main(): - arg_spec = dict( - name=dict(required=True), - virtualenv=dict(default=None, required=False), - virtualenv_site_packages=dict(default='no', type='bool'), - virtualenv_command=dict(default='virtualenv', required=False), - executable=dict(default='easy_install', required=False), - ) - - module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) - - name = module.params['name'] - env = module.params['virtualenv'] - executable = module.params['executable'] - site_packages = module.params['virtualenv_site_packages'] - virtualenv_command = module.params['virtualenv_command'] - - rc = 0 - err = '' - out = '' - - if env: - virtualenv = module.get_bin_path(virtualenv_command, True) - - if not os.path.exists(os.path.join(env, 'bin', 'activate')): - if module.check_mode: - module.exit_json(changed=True) - command = '%s %s' % (virtualenv, env) - if site_packages: - command += ' --system-site-packages' - cwd = tempfile.gettempdir() - rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd) - - rc += rc_venv - out += out_venv - err += err_venv - - easy_install = _get_easy_install(module, env, executable) - - cmd = None - changed = False - installed = _is_package_installed(module, name, easy_install) - - if not installed: - if module.check_mode: - module.exit_json(changed=True) - cmd = '%s %s' % (easy_install, name) - rc_easy_inst, out_easy_inst, err_easy_inst = module.run_command(cmd) - - rc += rc_easy_inst - out += out_easy_inst - err += err_easy_inst - - changed = True - - if rc != 0: - module.fail_json(msg=err, cmd=cmd) - - module.exit_json(changed=changed, binary=easy_install, - name=name, virtualenv=env) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/gem b/library/packaging/gem deleted file mode 100644 index 3740a3e7ce3..00000000000 --- a/library/packaging/gem +++ /dev/null @@ -1,238 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Johan Wiren -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: gem -short_description: Manage Ruby gems -description: - - Manage installation and uninstallation of Ruby gems. -version_added: "1.1" -options: - name: - description: - - The name of the gem to be managed. - required: true - state: - description: - - The desired state of the gem. C(latest) ensures that the latest version is installed. - required: false - choices: [present, absent, latest] - default: present - gem_source: - description: - - The path to a local gem used as installation source. - required: false - include_dependencies: - description: - - Whether to include dependencies or not. - required: false - choices: [ "yes", "no" ] - default: "yes" - repository: - description: - - The repository from which the gem will be installed - required: false - aliases: [source] - user_install: - description: - - Install gem in user's local gems cache or for all users - required: false - default: "yes" - version_added: "1.3" - executable: - description: - - Override the path to the gem executable - required: false - version_added: "1.4" - version: - description: - - Version of the gem to be installed/removed. - required: false - pre_release: - description: - - Allow installation of pre-release versions of the gem. - required: false - default: "no" - version_added: "1.6" -author: Johan Wiren -''' - -EXAMPLES = ''' -# Installs version 1.0 of vagrant. -- gem: name=vagrant version=1.0 state=present - -# Installs latest available version of rake. -- gem: name=rake state=latest - -# Installs rake version 1.0 from a local gem on disk. -- gem: name=rake gem_source=/path/to/gems/rake-1.0.gem state=present -''' - -import re - -def get_rubygems_path(module): - if module.params['executable']: - return module.params['executable'].split(' ') - else: - return [ module.get_bin_path('gem', True) ] - -def get_rubygems_version(module): - cmd = get_rubygems_path(module) + [ '--version' ] - (rc, out, err) = module.run_command(cmd, check_rc=True) - - match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out) - if not match: - return None - - return tuple(int(x) for x in match.groups()) - -def get_installed_versions(module, remote=False): - - cmd = get_rubygems_path(module) - cmd.append('query') - if remote: - cmd.append('--remote') - if module.params['repository']: - cmd.extend([ '--source', module.params['repository'] ]) - cmd.append('-n') - cmd.append('^%s$' % module.params['name']) - (rc, out, err) = module.run_command(cmd, check_rc=True) - installed_versions = [] - for line in out.splitlines(): - match = re.match(r"\S+\s+\((.+)\)", line) - if match: - versions = match.group(1) - for version in versions.split(', '): - installed_versions.append(version.split()[0]) - return installed_versions - -def exists(module): - - if module.params['state'] == 'latest': - remoteversions = get_installed_versions(module, remote=True) - if remoteversions: - module.params['version'] = remoteversions[0] - installed_versions = get_installed_versions(module) - if module.params['version']: - if module.params['version'] in installed_versions: - return True - else: - if installed_versions: - return True - return False - -def uninstall(module): - - if module.check_mode: - return - cmd = get_rubygems_path(module) - cmd.append('uninstall') - if module.params['version']: - cmd.extend([ '--version', module.params['version'] ]) - else: - cmd.append('--all') - cmd.append('--executable') - cmd.append(module.params['name']) - module.run_command(cmd, check_rc=True) - -def install(module): - - if module.check_mode: - return - - ver = get_rubygems_version(module) - if ver: - major = ver[0] - else: - major = None - - cmd = get_rubygems_path(module) - cmd.append('install') - if module.params['version']: - cmd.extend([ '--version', module.params['version'] ]) - if module.params['repository']: - cmd.extend([ '--source', module.params['repository'] ]) - if not module.params['include_dependencies']: - cmd.append('--ignore-dependencies') - else: - if major and major < 2: - cmd.append('--include-dependencies') - if module.params['user_install']: - cmd.append('--user-install') - else: - cmd.append('--no-user-install') - if module.params['pre_release']: - cmd.append('--pre') - cmd.append('--no-rdoc') - cmd.append('--no-ri') - cmd.append(module.params['gem_source']) - module.run_command(cmd, check_rc=True) - -def main(): - - module = AnsibleModule( - argument_spec = dict( - executable = dict(required=False, type='str'), - gem_source = dict(required=False, type='str'), - include_dependencies = dict(required=False, default=True, type='bool'), - name = dict(required=True, type='str'), - repository = dict(required=False, aliases=['source'], type='str'), - state = dict(required=False, default='present', choices=['present','absent','latest'], type='str'), - user_install = dict(required=False, default=True, type='bool'), - pre_release = dict(required=False, default=False, type='bool'), - version = dict(required=False, type='str'), - ), - supports_check_mode = True, - mutually_exclusive = [ ['gem_source','repository'], ['gem_source','version'] ], - ) - - if module.params['version'] and module.params['state'] == 'latest': - module.fail_json(msg="Cannot specify version when state=latest") - if module.params['gem_source'] and module.params['state'] == 'latest': - module.fail_json(msg="Cannot maintain state=latest when installing from local source") - - if not module.params['gem_source']: - module.params['gem_source'] = module.params['name'] - - changed = False - - if module.params['state'] in [ 'present', 'latest']: - if not exists(module): - install(module) - changed = True - elif module.params['state'] == 'absent': - if exists(module): - uninstall(module) - changed = True - - result = {} - result['name'] = module.params['name'] - result['state'] = module.params['state'] - if module.params['version']: - result['version'] = module.params['version'] - result['changed'] = changed - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/homebrew b/library/packaging/homebrew deleted file mode 100644 index 2ecac0c4ace..00000000000 --- a/library/packaging/homebrew +++ /dev/null @@ -1,835 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Andrew Dunham -# (c) 2013, Daniel Jaouen -# -# Based on macports (Jimmy Tang ) -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: homebrew -author: Andrew Dunham and Daniel Jaouen -short_description: Package manager for Homebrew -description: - - Manages Homebrew packages -version_added: "1.1" -options: - name: - description: - - name of package to install/remove - required: true - state: - description: - - state of the package - choices: [ 'head', 'latest', 'present', 'absent', 'linked', 'unlinked' ] - required: false - default: present - update_homebrew: - description: - - update homebrew itself first - required: false - default: "no" - choices: [ "yes", "no" ] - upgrade_all: - description: - - upgrade all homebrew packages - required: false - default: no - choices: [ "yes", "no" ] - install_options: - description: - - options flags to install a package - required: false - default: null - version_added: "1.4" -notes: [] -''' -EXAMPLES = ''' -- homebrew: name=foo state=present -- homebrew: name=foo state=present update_homebrew=yes -- homebrew: name=foo state=latest update_homebrew=yes -- homebrew: update_homebrew=yes upgrade_all=yes -- homebrew: name=foo state=head -- homebrew: name=foo state=linked -- homebrew: name=foo state=absent -- homebrew: name=foo,bar state=absent -- homebrew: name=foo state=present install_options=with-baz,enable-debug -''' - -import os.path -import re - - -# exceptions -------------------------------------------------------------- {{{ -class HomebrewException(Exception): - pass -# /exceptions ------------------------------------------------------------- }}} - - -# utils ------------------------------------------------------------------- {{{ -def _create_regex_group(s): - lines = (line.strip() for line in s.split('\n') if line.strip()) - chars = filter(None, (line.split('#')[0].strip() for line in lines)) - group = r'[^' + r''.join(chars) + r']' - return re.compile(group) -# /utils ------------------------------------------------------------------ }}} - - -class Homebrew(object): - '''A class to manage Homebrew packages.''' - - # class regexes ------------------------------------------------ {{{ - VALID_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - : # colons - {sep} # the OS-specific path separator - . # dots - - # dashes - '''.format(sep=os.path.sep) - - VALID_BREW_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - {sep} # the OS-specific path separator - . # dots - - # dashes - '''.format(sep=os.path.sep) - - VALID_PACKAGE_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - . # dots - \+ # plusses - - # dashes - ''' - - INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS) - INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS) - INVALID_PACKAGE_REGEX = _create_regex_group(VALID_PACKAGE_CHARS) - # /class regexes ----------------------------------------------- }}} - - # class validations -------------------------------------------- {{{ - @classmethod - def valid_path(cls, path): - ''' - `path` must be one of: - - list of paths - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - colons - - os.path.sep - ''' - - if isinstance(path, basestring): - return not cls.INVALID_PATH_REGEX.search(path) - - try: - iter(path) - except TypeError: - return False - else: - paths = path - return all(cls.valid_brew_path(path_) for path_ in paths) - - @classmethod - def valid_brew_path(cls, brew_path): - ''' - `brew_path` must be one of: - - None - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - os.path.sep - ''' - - if brew_path is None: - return True - - return ( - isinstance(brew_path, basestring) - and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) - ) - - @classmethod - def valid_package(cls, package): - '''A valid package is either None or alphanumeric.''' - - if package is None: - return True - - return ( - isinstance(package, basestring) - and not cls.INVALID_PACKAGE_REGEX.search(package) - ) - - @classmethod - def valid_state(cls, state): - ''' - A valid state is one of: - - None - - installed - - upgraded - - head - - linked - - unlinked - - absent - ''' - - if state is None: - return True - else: - return ( - isinstance(state, basestring) - and state.lower() in ( - 'installed', - 'upgraded', - 'head', - 'linked', - 'unlinked', - 'absent', - ) - ) - - @classmethod - def valid_module(cls, module): - '''A valid module is an instance of AnsibleModule.''' - - return isinstance(module, AnsibleModule) - - # /class validations ------------------------------------------- }}} - - # class properties --------------------------------------------- {{{ - @property - def module(self): - return self._module - - @module.setter - def module(self, module): - if not self.valid_module(module): - self._module = None - self.failed = True - self.message = 'Invalid module: {0}.'.format(module) - raise HomebrewException(self.message) - - else: - self._module = module - return module - - @property - def path(self): - return self._path - - @path.setter - def path(self, path): - if not self.valid_path(path): - self._path = [] - self.failed = True - self.message = 'Invalid path: {0}.'.format(path) - raise HomebrewException(self.message) - - else: - if isinstance(path, basestring): - self._path = path.split(':') - else: - self._path = path - - return path - - @property - def brew_path(self): - return self._brew_path - - @brew_path.setter - def brew_path(self, brew_path): - if not self.valid_brew_path(brew_path): - self._brew_path = None - self.failed = True - self.message = 'Invalid brew_path: {0}.'.format(brew_path) - raise HomebrewException(self.message) - - else: - self._brew_path = brew_path - return brew_path - - @property - def params(self): - return self._params - - @params.setter - def params(self, params): - self._params = self.module.params - return self._params - - @property - def current_package(self): - return self._current_package - - @current_package.setter - def current_package(self, package): - if not self.valid_package(package): - self._current_package = None - self.failed = True - self.message = 'Invalid package: {0}.'.format(package) - raise HomebrewException(self.message) - - else: - self._current_package = package - return package - # /class properties -------------------------------------------- }}} - - def __init__(self, module, path=None, packages=None, state=None, - update_homebrew=False, upgrade_all=False, - install_options=None): - if not install_options: - install_options = list() - self._setup_status_vars() - self._setup_instance_vars(module=module, path=path, packages=packages, - state=state, update_homebrew=update_homebrew, - upgrade_all=upgrade_all, - install_options=install_options, ) - - self._prep() - - # prep --------------------------------------------------------- {{{ - def _setup_status_vars(self): - self.failed = False - self.changed = False - self.changed_count = 0 - self.unchanged_count = 0 - self.message = '' - - def _setup_instance_vars(self, **kwargs): - for key, val in kwargs.iteritems(): - setattr(self, key, val) - - def _prep(self): - self._prep_path() - self._prep_brew_path() - - def _prep_path(self): - if not self.path: - self.path = ['/usr/local/bin'] - - def _prep_brew_path(self): - if not self.module: - self.brew_path = None - self.failed = True - self.message = 'AnsibleModule not set.' - raise HomebrewException(self.message) - - self.brew_path = self.module.get_bin_path( - 'brew', - required=True, - opt_dirs=self.path, - ) - if not self.brew_path: - self.brew_path = None - self.failed = True - self.message = 'Unable to locate homebrew executable.' - raise HomebrewException('Unable to locate homebrew executable.') - - return self.brew_path - - def _status(self): - return (self.failed, self.changed, self.message) - # /prep -------------------------------------------------------- }}} - - def run(self): - try: - self._run() - except HomebrewException: - pass - - if not self.failed and (self.changed_count + self.unchanged_count > 1): - self.message = "Changed: %d, Unchanged: %d" % ( - self.changed_count, - self.unchanged_count, - ) - (failed, changed, message) = self._status() - - return (failed, changed, message) - - # checks ------------------------------------------------------- {{{ - def _current_package_is_installed(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - cmd = [ - "{brew_path}".format(brew_path=self.brew_path), - "info", - self.current_package, - ] - rc, out, err = self.module.run_command(cmd) - for line in out.split('\n'): - if ( - re.search(r'Built from source', line) - or re.search(r'Poured from bottle', line) - ): - return True - - return False - - def _outdated_packages(self): - rc, out, err = self.module.run_command([ - self.brew_path, - 'outdated', - ]) - return [line.split(' ')[0].strip() for line in out.split('\n') if line] - - def _current_package_is_outdated(self): - if not self.valid_package(self.current_package): - return False - - return self.current_package in self._outdated_packages() - - def _current_package_is_installed_from_head(self): - if not Homebrew.valid_package(self.current_package): - return False - elif not self._current_package_is_installed(): - return False - - rc, out, err = self.module.run_command([ - self.brew_path, - 'info', - self.current_package, - ]) - - try: - version_info = [line for line in out.split('\n') if line][0] - except IndexError: - return False - - return version_info.split(' ')[-1] == 'HEAD' - # /checks ------------------------------------------------------ }}} - - # commands ----------------------------------------------------- {{{ - def _run(self): - if self.update_homebrew: - self._update_homebrew() - - if self.upgrade_all: - self._upgrade_all() - - if self.packages: - if self.state == 'installed': - return self._install_packages() - elif self.state == 'upgraded': - return self._upgrade_packages() - elif self.state == 'head': - return self._install_packages() - elif self.state == 'linked': - return self._link_packages() - elif self.state == 'unlinked': - return self._unlink_packages() - elif self.state == 'absent': - return self._uninstall_packages() - - # updated -------------------------------- {{{ - def _update_homebrew(self): - rc, out, err = self.module.run_command([ - self.brew_path, - 'update', - ]) - if rc == 0: - if out and isinstance(out, basestring): - already_updated = any( - re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) - for s in out.split('\n') - if s - ) - if not already_updated: - self.changed = True - self.message = 'Homebrew updated successfully.' - else: - self.message = 'Homebrew already up-to-date.' - - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - # /updated ------------------------------- }}} - - # _upgrade_all --------------------------- {{{ - def _upgrade_all(self): - rc, out, err = self.module.run_command([ - self.brew_path, - 'upgrade', - ]) - if rc == 0: - if not out: - self.message = 'Homebrew packages already upgraded.' - - else: - self.changed = True - self.message = 'Homebrew upgraded.' - - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - # /_upgrade_all -------------------------- }}} - - # installed ------------------------------ {{{ - def _install_current_package(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if self._current_package_is_installed(): - self.unchanged_count += 1 - self.message = 'Package already installed: {0}'.format( - self.current_package, - ) - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be installed: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - if self.state == 'head': - head = '--HEAD' - else: - head = None - - opts = ( - [self.brew_path, 'install'] - + self.install_options - + [self.current_package, head] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if self._current_package_is_installed(): - self.changed_count += 1 - self.changed = True - self.message = 'Package installed: {0}'.format(self.current_package) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - - def _install_packages(self): - for package in self.packages: - self.current_package = package - self._install_current_package() - - return True - # /installed ----------------------------- }}} - - # upgraded ------------------------------- {{{ - def _upgrade_current_package(self): - command = 'upgrade' - - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - command = 'install' - - if self._current_package_is_installed() and not self._current_package_is_outdated(): - self.message = 'Package is already upgraded: {0}'.format( - self.current_package, - ) - self.unchanged_count += 1 - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be upgraded: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - opts = ( - [self.brew_path, command] - + self.install_options - + [self.current_package] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if self._current_package_is_installed() and not self._current_package_is_outdated(): - self.changed_count += 1 - self.changed = True - self.message = 'Package upgraded: {0}'.format(self.current_package) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - - def _upgrade_all_packages(self): - opts = ( - [self.brew_path, 'upgrade'] - + self.install_options - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if rc == 0: - self.changed = True - self.message = 'All packages upgraded.' - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - - def _upgrade_packages(self): - if not self.packages: - self._upgrade_all_packages() - else: - for package in self.packages: - self.current_package = package - self._upgrade_current_package() - return True - # /upgraded ------------------------------ }}} - - # uninstalled ---------------------------- {{{ - def _uninstall_current_package(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - self.unchanged_count += 1 - self.message = 'Package already uninstalled: {0}'.format( - self.current_package, - ) - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be uninstalled: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - opts = ( - [self.brew_path, 'uninstall'] - + self.install_options - + [self.current_package] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if not self._current_package_is_installed(): - self.changed_count += 1 - self.changed = True - self.message = 'Package uninstalled: {0}'.format(self.current_package) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - - def _uninstall_packages(self): - for package in self.packages: - self.current_package = package - self._uninstall_current_package() - - return True - # /uninstalled ----------------------------- }}} - - # linked --------------------------------- {{{ - def _link_current_package(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - self.failed = True - self.message = 'Package not installed: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be linked: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - opts = ( - [self.brew_path, 'link'] - + self.install_options - + [self.current_package] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if rc == 0: - self.changed_count += 1 - self.changed = True - self.message = 'Package linked: {0}'.format(self.current_package) - - return True - else: - self.failed = True - self.message = 'Package could not be linked: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - def _link_packages(self): - for package in self.packages: - self.current_package = package - self._link_current_package() - - return True - # /linked -------------------------------- }}} - - # unlinked ------------------------------- {{{ - def _unlink_current_package(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - self.failed = True - self.message = 'Package not installed: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be unlinked: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - opts = ( - [self.brew_path, 'unlink'] - + self.install_options - + [self.current_package] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if rc == 0: - self.changed_count += 1 - self.changed = True - self.message = 'Package unlinked: {0}'.format(self.current_package) - - return True - else: - self.failed = True - self.message = 'Package could not be unlinked: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - def _unlink_packages(self): - for package in self.packages: - self.current_package = package - self._unlink_current_package() - - return True - # /unlinked ------------------------------ }}} - # /commands ---------------------------------------------------- }}} - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(aliases=["pkg"], required=False), - path=dict(required=False), - state=dict( - default="present", - choices=[ - "present", "installed", - "latest", "upgraded", "head", - "linked", "unlinked", - "absent", "removed", "uninstalled", - ], - ), - update_homebrew=dict( - default="no", - aliases=["update-brew"], - type='bool', - ), - upgrade_all=dict( - default="no", - aliases=["upgrade"], - type='bool', - ), - install_options=dict( - default=None, - aliases=['options'], - type='list', - ) - ), - supports_check_mode=True, - ) - p = module.params - - if p['name']: - packages = p['name'].split(',') - else: - packages = None - - path = p['path'] - if path: - path = path.split(':') - else: - path = ['/usr/local/bin'] - - state = p['state'] - if state in ('present', 'installed'): - state = 'installed' - if state in ('head', ): - state = 'head' - if state in ('latest', 'upgraded'): - state = 'upgraded' - if state == 'linked': - state = 'linked' - if state == 'unlinked': - state = 'unlinked' - if state in ('absent', 'removed', 'uninstalled'): - state = 'absent' - - update_homebrew = p['update_homebrew'] - upgrade_all = p['upgrade_all'] - p['install_options'] = p['install_options'] or [] - install_options = ['--{0}'.format(install_option) - for install_option in p['install_options']] - - brew = Homebrew(module=module, path=path, packages=packages, - state=state, update_homebrew=update_homebrew, - upgrade_all=upgrade_all, install_options=install_options) - (failed, changed, message) = brew.run() - if failed: - module.fail_json(msg=message) - else: - module.exit_json(changed=changed, msg=message) - -# this is magic, see lib/ansible/module_common.py -#<> -main() diff --git a/library/packaging/homebrew_cask b/library/packaging/homebrew_cask deleted file mode 100644 index dede8d4bb36..00000000000 --- a/library/packaging/homebrew_cask +++ /dev/null @@ -1,513 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Daniel Jaouen -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: homebrew_cask -author: Daniel Jaouen -short_description: Install/uninstall homebrew casks. -description: - - Manages Homebrew casks. -version_added: "1.6" -options: - name: - description: - - name of cask to install/remove - required: true - state: - description: - - state of the cask - choices: [ 'installed', 'uninstalled' ] - required: false - default: present -''' -EXAMPLES = ''' -- homebrew_cask: name=alfred state=present -- homebrew_cask: name=alfred state=absent -''' - -import os.path -import re - - -# exceptions -------------------------------------------------------------- {{{ -class HomebrewCaskException(Exception): - pass -# /exceptions ------------------------------------------------------------- }}} - - -# utils ------------------------------------------------------------------- {{{ -def _create_regex_group(s): - lines = (line.strip() for line in s.split('\n') if line.strip()) - chars = filter(None, (line.split('#')[0].strip() for line in lines)) - group = r'[^' + r''.join(chars) + r']' - return re.compile(group) -# /utils ------------------------------------------------------------------ }}} - - -class HomebrewCask(object): - '''A class to manage Homebrew casks.''' - - # class regexes ------------------------------------------------ {{{ - VALID_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - : # colons - {sep} # the OS-specific path separator - - # dashes - '''.format(sep=os.path.sep) - - VALID_BREW_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - {sep} # the OS-specific path separator - - # dashes - '''.format(sep=os.path.sep) - - VALID_CASK_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - - # dashes - ''' - - INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS) - INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS) - INVALID_CASK_REGEX = _create_regex_group(VALID_CASK_CHARS) - # /class regexes ----------------------------------------------- }}} - - # class validations -------------------------------------------- {{{ - @classmethod - def valid_path(cls, path): - ''' - `path` must be one of: - - list of paths - - a string containing only: - - alphanumeric characters - - dashes - - spaces - - colons - - os.path.sep - ''' - - if isinstance(path, basestring): - return not cls.INVALID_PATH_REGEX.search(path) - - try: - iter(path) - except TypeError: - return False - else: - paths = path - return all(cls.valid_brew_path(path_) for path_ in paths) - - @classmethod - def valid_brew_path(cls, brew_path): - ''' - `brew_path` must be one of: - - None - - a string containing only: - - alphanumeric characters - - dashes - - spaces - - os.path.sep - ''' - - if brew_path is None: - return True - - return ( - isinstance(brew_path, basestring) - and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) - ) - - @classmethod - def valid_cask(cls, cask): - '''A valid cask is either None or alphanumeric + backslashes.''' - - if cask is None: - return True - - return ( - isinstance(cask, basestring) - and not cls.INVALID_CASK_REGEX.search(cask) - ) - - @classmethod - def valid_state(cls, state): - ''' - A valid state is one of: - - installed - - absent - ''' - - if state is None: - return True - else: - return ( - isinstance(state, basestring) - and state.lower() in ( - 'installed', - 'absent', - ) - ) - - @classmethod - def valid_module(cls, module): - '''A valid module is an instance of AnsibleModule.''' - - return isinstance(module, AnsibleModule) - # /class validations ------------------------------------------- }}} - - # class properties --------------------------------------------- {{{ - @property - def module(self): - return self._module - - @module.setter - def module(self, module): - if not self.valid_module(module): - self._module = None - self.failed = True - self.message = 'Invalid module: {0}.'.format(module) - raise HomebrewCaskException(self.message) - - else: - self._module = module - return module - - @property - def path(self): - return self._path - - @path.setter - def path(self, path): - if not self.valid_path(path): - self._path = [] - self.failed = True - self.message = 'Invalid path: {0}.'.format(path) - raise HomebrewCaskException(self.message) - - else: - if isinstance(path, basestring): - self._path = path.split(':') - else: - self._path = path - - return path - - @property - def brew_path(self): - return self._brew_path - - @brew_path.setter - def brew_path(self, brew_path): - if not self.valid_brew_path(brew_path): - self._brew_path = None - self.failed = True - self.message = 'Invalid brew_path: {0}.'.format(brew_path) - raise HomebrewCaskException(self.message) - - else: - self._brew_path = brew_path - return brew_path - - @property - def params(self): - return self._params - - @params.setter - def params(self, params): - self._params = self.module.params - return self._params - - @property - def current_cask(self): - return self._current_cask - - @current_cask.setter - def current_cask(self, cask): - if not self.valid_cask(cask): - self._current_cask = None - self.failed = True - self.message = 'Invalid cask: {0}.'.format(cask) - raise HomebrewCaskException(self.message) - - else: - self._current_cask = cask - return cask - # /class properties -------------------------------------------- }}} - - def __init__(self, module, path=None, casks=None, state=None): - self._setup_status_vars() - self._setup_instance_vars(module=module, path=path, casks=casks, - state=state) - - self._prep() - - # prep --------------------------------------------------------- {{{ - def _setup_status_vars(self): - self.failed = False - self.changed = False - self.changed_count = 0 - self.unchanged_count = 0 - self.message = '' - - def _setup_instance_vars(self, **kwargs): - for key, val in kwargs.iteritems(): - setattr(self, key, val) - - def _prep(self): - self._prep_path() - self._prep_brew_path() - - def _prep_path(self): - if not self.path: - self.path = ['/usr/local/bin'] - - def _prep_brew_path(self): - if not self.module: - self.brew_path = None - self.failed = True - self.message = 'AnsibleModule not set.' - raise HomebrewCaskException(self.message) - - self.brew_path = self.module.get_bin_path( - 'brew', - required=True, - opt_dirs=self.path, - ) - if not self.brew_path: - self.brew_path = None - self.failed = True - self.message = 'Unable to locate homebrew executable.' - raise HomebrewCaskException('Unable to locate homebrew executable.') - - return self.brew_path - - def _status(self): - return (self.failed, self.changed, self.message) - # /prep -------------------------------------------------------- }}} - - def run(self): - try: - self._run() - except HomebrewCaskException: - pass - - if not self.failed and (self.changed_count + self.unchanged_count > 1): - self.message = "Changed: %d, Unchanged: %d" % ( - self.changed_count, - self.unchanged_count, - ) - (failed, changed, message) = self._status() - - return (failed, changed, message) - - # checks ------------------------------------------------------- {{{ - def _current_cask_is_installed(self): - if not self.valid_cask(self.current_cask): - self.failed = True - self.message = 'Invalid cask: {0}.'.format(self.current_cask) - raise HomebrewCaskException(self.message) - - cmd = [self.brew_path, 'cask', 'list'] - rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0]) - - if 'nothing to list' in err: - return False - elif rc == 0: - casks = [cask_.strip() for cask_ in out.split('\n') if cask_.strip()] - return self.current_cask in casks - else: - self.failed = True - self.message = err.strip() - raise HomebrewCaskException(self.message) - # /checks ------------------------------------------------------ }}} - - # commands ----------------------------------------------------- {{{ - def _run(self): - if self.state == 'installed': - return self._install_casks() - elif self.state == 'absent': - return self._uninstall_casks() - - if self.command: - return self._command() - - # updated -------------------------------- {{{ - def _update_homebrew(self): - rc, out, err = self.module.run_command([ - self.brew_path, - 'update', - ], path_prefix=self.path[0]) - if rc == 0: - if out and isinstance(out, basestring): - already_updated = any( - re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) - for s in out.split('\n') - if s - ) - if not already_updated: - self.changed = True - self.message = 'Homebrew updated successfully.' - else: - self.message = 'Homebrew already up-to-date.' - - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewCaskException(self.message) - # /updated ------------------------------- }}} - - # installed ------------------------------ {{{ - def _install_current_cask(self): - if not self.valid_cask(self.current_cask): - self.failed = True - self.message = 'Invalid cask: {0}.'.format(self.current_cask) - raise HomebrewCaskException(self.message) - - if self._current_cask_is_installed(): - self.unchanged_count += 1 - self.message = 'Cask already installed: {0}'.format( - self.current_cask, - ) - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Cask would be installed: {0}'.format( - self.current_cask - ) - raise HomebrewCaskException(self.message) - - cmd = [opt - for opt in (self.brew_path, 'cask', 'install', self.current_cask) - if opt] - - rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0]) - - if self._current_cask_is_installed(): - self.changed_count += 1 - self.changed = True - self.message = 'Cask installed: {0}'.format(self.current_cask) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewCaskException(self.message) - - def _install_casks(self): - for cask in self.casks: - self.current_cask = cask - self._install_current_cask() - - return True - # /installed ----------------------------- }}} - - # uninstalled ---------------------------- {{{ - def _uninstall_current_cask(self): - if not self.valid_cask(self.current_cask): - self.failed = True - self.message = 'Invalid cask: {0}.'.format(self.current_cask) - raise HomebrewCaskException(self.message) - - if not self._current_cask_is_installed(): - self.unchanged_count += 1 - self.message = 'Cask already uninstalled: {0}'.format( - self.current_cask, - ) - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Cask would be uninstalled: {0}'.format( - self.current_cask - ) - raise HomebrewCaskException(self.message) - - cmd = [opt - for opt in (self.brew_path, 'cask', 'uninstall', self.current_cask) - if opt] - - rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0]) - - if not self._current_cask_is_installed(): - self.changed_count += 1 - self.changed = True - self.message = 'Cask uninstalled: {0}'.format(self.current_cask) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewCaskException(self.message) - - def _uninstall_casks(self): - for cask in self.casks: - self.current_cask = cask - self._uninstall_current_cask() - - return True - # /uninstalled ----------------------------- }}} - # /commands ---------------------------------------------------- }}} - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(aliases=["cask"], required=False), - path=dict(required=False), - state=dict( - default="present", - choices=[ - "present", "installed", - "absent", "removed", "uninstalled", - ], - ), - ), - supports_check_mode=True, - ) - p = module.params - - if p['name']: - casks = p['name'].split(',') - else: - casks = None - - path = p['path'] - if path: - path = path.split(':') - else: - path = ['/usr/local/bin'] - - state = p['state'] - if state in ('present', 'installed'): - state = 'installed' - if state in ('absent', 'removed', 'uninstalled'): - state = 'absent' - - brew_cask = HomebrewCask(module=module, path=path, casks=casks, - state=state) - (failed, changed, message) = brew_cask.run() - if failed: - module.fail_json(msg=message) - else: - module.exit_json(changed=changed, msg=message) - -# this is magic, see lib/ansible/module_common.py -#<> -main() diff --git a/library/packaging/homebrew_tap b/library/packaging/homebrew_tap deleted file mode 100644 index a79ba076a8a..00000000000 --- a/library/packaging/homebrew_tap +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Daniel Jaouen -# Based on homebrew (Andrew Dunham ) -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import re - -DOCUMENTATION = ''' ---- -module: homebrew_tap -author: Daniel Jaouen -short_description: Tap a Homebrew repository. -description: - - Tap external Homebrew repositories. -version_added: "1.6" -options: - tap: - description: - - The repository to tap. - required: true - state: - description: - - state of the repository. - choices: [ 'present', 'absent' ] - required: false - default: 'present' -requirements: [ homebrew ] -''' - -EXAMPLES = ''' -homebrew_tap: tap=homebrew/dupes state=present -homebrew_tap: tap=homebrew/dupes state=absent -homebrew_tap: tap=homebrew/dupes,homebrew/science state=present -''' - - -def a_valid_tap(tap): - '''Returns True if the tap is valid.''' - regex = re.compile(r'^(\S+)/(homebrew-)?(\w+)$') - return regex.match(tap) - - -def already_tapped(module, brew_path, tap): - '''Returns True if already tapped.''' - - rc, out, err = module.run_command([ - brew_path, - 'tap', - ]) - taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_] - return tap.lower() in taps - - -def add_tap(module, brew_path, tap): - '''Adds a single tap.''' - failed, changed, msg = False, False, '' - - if not a_valid_tap(tap): - failed = True - msg = 'not a valid tap: %s' % tap - - elif not already_tapped(module, brew_path, tap): - if module.check_mode: - module.exit_json(changed=True) - - rc, out, err = module.run_command([ - brew_path, - 'tap', - tap, - ]) - if already_tapped(module, brew_path, tap): - changed = True - msg = 'successfully tapped: %s' % tap - else: - failed = True - msg = 'failed to tap: %s' % tap - - else: - msg = 'already tapped: %s' % tap - - return (failed, changed, msg) - - -def add_taps(module, brew_path, taps): - '''Adds one or more taps.''' - failed, unchanged, added, msg = False, 0, 0, '' - - for tap in taps: - (failed, changed, msg) = add_tap(module, brew_path, tap) - if failed: - break - if changed: - added += 1 - else: - unchanged += 1 - - if failed: - msg = 'added: %d, unchanged: %d, error: ' + msg - msg = msg % (added, unchanged) - elif added: - changed = True - msg = 'added: %d, unchanged: %d' % (added, unchanged) - else: - msg = 'added: %d, unchanged: %d' % (added, unchanged) - - return (failed, changed, msg) - - -def remove_tap(module, brew_path, tap): - '''Removes a single tap.''' - failed, changed, msg = False, False, '' - - if not a_valid_tap(tap): - failed = True - msg = 'not a valid tap: %s' % tap - - elif already_tapped(module, brew_path, tap): - if module.check_mode: - module.exit_json(changed=True) - - rc, out, err = module.run_command([ - brew_path, - 'untap', - tap, - ]) - if not already_tapped(module, brew_path, tap): - changed = True - msg = 'successfully untapped: %s' % tap - else: - failed = True - msg = 'failed to untap: %s' % tap - - else: - msg = 'already untapped: %s' % tap - - return (failed, changed, msg) - - -def remove_taps(module, brew_path, taps): - '''Removes one or more taps.''' - failed, unchanged, removed, msg = False, 0, 0, '' - - for tap in taps: - (failed, changed, msg) = remove_tap(module, brew_path, tap) - if failed: - break - if changed: - removed += 1 - else: - unchanged += 1 - - if failed: - msg = 'removed: %d, unchanged: %d, error: ' + msg - msg = msg % (removed, unchanged) - elif removed: - changed = True - msg = 'removed: %d, unchanged: %d' % (removed, unchanged) - else: - msg = 'removed: %d, unchanged: %d' % (removed, unchanged) - - return (failed, changed, msg) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(aliases=['tap'], required=True), - state=dict(default='present', choices=['present', 'absent']), - ), - supports_check_mode=True, - ) - - brew_path = module.get_bin_path( - 'brew', - required=True, - opt_dirs=['/usr/local/bin'] - ) - - taps = module.params['name'].split(',') - - if module.params['state'] == 'present': - failed, changed, msg = add_taps(module, brew_path, taps) - - if failed: - module.fail_json(msg=msg) - else: - module.exit_json(changed=changed, msg=msg) - - elif module.params['state'] == 'absent': - failed, changed, msg = remove_taps(module, brew_path, taps) - - if failed: - module.fail_json(msg=msg) - else: - module.exit_json(changed=changed, msg=msg) - -# this is magic, see lib/ansible/module_common.py -#<> -main() diff --git a/library/packaging/layman b/library/packaging/layman deleted file mode 100644 index 57c03528c9e..00000000000 --- a/library/packaging/layman +++ /dev/null @@ -1,236 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Jakub Jirutka -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import shutil -from os import path -from urllib2 import Request, urlopen, URLError - -DOCUMENTATION = ''' ---- -module: layman -author: Jakub Jirutka -version_added: "1.6" -short_description: Manage Gentoo overlays -description: - - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. - Please note that Layman must be installed on a managed node prior using this module. -options: - name: - description: - - The overlay id to install, synchronize, or uninstall. - Use 'ALL' to sync all of the installed overlays (can be used only when C(state=updated)). - required: true - list_url: - description: - - An URL of the alternative overlays list that defines the overlay to install. - This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where - C(overlay_defs) is readed from the Layman's configuration. - required: false - state: - description: - - Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay. - required: false - default: present - choices: [present, absent, updated] -''' - -EXAMPLES = ''' -# Install the overlay 'mozilla' which is on the central overlays list. -- layman: name=mozilla - -# Install the overlay 'cvut' from the specified alternative list. -- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml - -# Update (sync) the overlay 'cvut', or install if not installed yet. -- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml state=updated - -# Update (sync) all of the installed overlays. -- layman: name=ALL state=updated - -# Uninstall the overlay 'cvut'. -- layman: name=cvut state=absent -''' - -USERAGENT = 'ansible-httpget' - -try: - from layman.api import LaymanAPI - from layman.config import BareConfig - HAS_LAYMAN_API = True -except ImportError: - HAS_LAYMAN_API = False - - -class ModuleError(Exception): pass - - -def init_layman(config=None): - '''Returns the initialized ``LaymanAPI``. - - :param config: the layman's configuration to use (optional) - ''' - if config is None: config = BareConfig(read_configfile=True, quietness=1) - return LaymanAPI(config) - - -def download_url(url, dest): - ''' - :param url: the URL to download - :param dest: the absolute path of where to save the downloaded content to; - it must be writable and not a directory - - :raises ModuleError - ''' - request = Request(url) - request.add_header('User-agent', USERAGENT) - - try: - response = urlopen(request) - except URLError, e: - raise ModuleError("Failed to get %s: %s" % (url, str(e))) - - try: - with open(dest, 'w') as f: - shutil.copyfileobj(response, f) - except IOError, e: - raise ModuleError("Failed to write: %s" % str(e)) - - -def install_overlay(name, list_url=None): - '''Installs the overlay repository. If not on the central overlays list, - then :list_url of an alternative list must be provided. The list will be - fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the - ``overlay_defs`` is read from the Layman's configuration). - - :param name: the overlay id - :param list_url: the URL of the remote repositories list to look for the overlay - definition (optional, default: None) - - :returns: True if the overlay was installed, or False if already exists - (i.e. nothing has changed) - :raises ModuleError - ''' - # read Layman configuration - layman_conf = BareConfig(read_configfile=True) - layman = init_layman(layman_conf) - - if layman.is_installed(name): - return False - - if not layman.is_repo(name): - if not list_url: raise ModuleError("Overlay '%s' is not on the list of known " \ - "overlays and URL of the remote list was not provided." % name) - - overlay_defs = layman_conf.get_option('overlay_defs') - dest = path.join(overlay_defs, name + '.xml') - - download_url(list_url, dest) - - # reload config - layman = init_layman() - - if not layman.add_repos(name): raise ModuleError(layman.get_errors()) - - return True - - -def uninstall_overlay(name): - '''Uninstalls the given overlay repository from the system. - - :param name: the overlay id to uninstall - - :returns: True if the overlay was uninstalled, or False if doesn't exist - (i.e. nothing has changed) - :raises ModuleError - ''' - layman = init_layman() - - if not layman.is_installed(name): - return False - - layman.delete_repos(name) - if layman.get_errors(): raise ModuleError(layman.get_errors()) - - return True - - -def sync_overlay(name): - '''Synchronizes the specified overlay repository. - - :param name: the overlay repository id to sync - :raises ModuleError - ''' - layman = init_layman() - - if not layman.sync(name): - messages = [ str(item[1]) for item in layman.sync_results[2] ] - raise ModuleError(messages) - - -def sync_overlays(): - '''Synchronize all of the installed overlays. - - :raises ModuleError - ''' - layman = init_layman() - - for name in layman.get_installed(): - sync_overlay(name) - - -def main(): - # define module - module = AnsibleModule( - argument_spec = { - 'name': { 'required': True }, - 'list_url': { 'aliases': ['url'] }, - 'state': { 'default': "present", 'choices': ['present', 'absent', 'updated'] }, - } - ) - - if not HAS_LAYMAN_API: - module.fail_json(msg='Layman is not installed') - - state, name, url = (module.params[key] for key in ['state', 'name', 'list_url']) - - changed = False - try: - if state == 'present': - changed = install_overlay(name, url) - - elif state == 'updated': - if name == 'ALL': - sync_overlays() - elif install_overlay(name, url): - changed = True - else: - sync_overlay(name) - else: - changed = uninstall_overlay(name) - - except ModuleError, e: - module.fail_json(msg=e.message) - else: - module.exit_json(changed=changed, name=name) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/macports b/library/packaging/macports deleted file mode 100644 index ae7010b1cbd..00000000000 --- a/library/packaging/macports +++ /dev/null @@ -1,217 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Jimmy Tang -# Based on okpg (Patrick Pelletier ), pacman -# (Afterburn) and pkgin (Shaun Zinck) modules -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: macports -author: Jimmy Tang -short_description: Package manager for MacPorts -description: - - Manages MacPorts packages -version_added: "1.1" -options: - name: - description: - - name of package to install/remove - required: true - state: - description: - - state of the package - choices: [ 'present', 'absent', 'active', 'inactive' ] - required: false - default: present - update_cache: - description: - - update the package db first - required: false - default: "no" - choices: [ "yes", "no" ] -notes: [] -''' -EXAMPLES = ''' -- macports: name=foo state=present -- macports: name=foo state=present update_cache=yes -- macports: name=foo state=absent -- macports: name=foo state=active -- macports: name=foo state=inactive -''' - -import pipes - -def update_package_db(module, port_path): - """ Updates packages list. """ - - rc, out, err = module.run_command("%s sync" % port_path) - - if rc != 0: - module.fail_json(msg="could not update package db") - - -def query_package(module, port_path, name, state="present"): - """ Returns whether a package is installed or not. """ - - if state == "present": - - rc, out, err = module.run_command("%s installed | grep -q ^.*%s" % (pipes.quote(port_path), pipes.quote(name)), use_unsafe_shell=True) - if rc == 0: - return True - - return False - - elif state == "active": - - rc, out, err = module.run_command("%s installed %s | grep -q active" % (pipes.quote(port_path), pipes.quote(name)), use_unsafe_shell=True) - - if rc == 0: - return True - - return False - - -def remove_packages(module, port_path, packages): - """ Uninstalls one or more packages if installed. """ - - remove_c = 0 - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, port_path, package): - continue - - rc, out, err = module.run_command("%s uninstall %s" % (port_path, package)) - - if query_package(module, port_path, package): - module.fail_json(msg="failed to remove %s: %s" % (package, out)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, port_path, packages): - """ Installs one or more packages if not already installed. """ - - install_c = 0 - - for package in packages: - if query_package(module, port_path, package): - continue - - rc, out, err = module.run_command("%s install %s" % (port_path, package)) - - if not query_package(module, port_path, package): - module.fail_json(msg="failed to install %s: %s" % (package, out)) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) - - module.exit_json(changed=False, msg="package(s) already present") - - -def activate_packages(module, port_path, packages): - """ Activate a package if it's inactive. """ - - activate_c = 0 - - for package in packages: - if not query_package(module, port_path, package): - module.fail_json(msg="failed to activate %s, package(s) not present" % (package)) - - if query_package(module, port_path, package, state="active"): - continue - - rc, out, err = module.run_command("%s activate %s" % (port_path, package)) - - if not query_package(module, port_path, package, state="active"): - module.fail_json(msg="failed to activate %s: %s" % (package, out)) - - activate_c += 1 - - if activate_c > 0: - module.exit_json(changed=True, msg="activated %s package(s)" % (activate_c)) - - module.exit_json(changed=False, msg="package(s) already active") - - -def deactivate_packages(module, port_path, packages): - """ Deactivate a package if it's active. """ - - deactivated_c = 0 - - for package in packages: - if not query_package(module, port_path, package): - module.fail_json(msg="failed to activate %s, package(s) not present" % (package)) - - if not query_package(module, port_path, package, state="active"): - continue - - rc, out, err = module.run_command("%s deactivate %s" % (port_path, package)) - - if query_package(module, port_path, package, state="active"): - module.fail_json(msg="failed to deactivated %s: %s" % (package, out)) - - deactivated_c += 1 - - if deactivated_c > 0: - module.exit_json(changed=True, msg="deactivated %s package(s)" % (deactivated_c)) - - module.exit_json(changed=False, msg="package(s) already inactive") - - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(aliases=["pkg"], required=True), - state = dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]), - update_cache = dict(default="no", aliases=["update-cache"], type='bool') - ) - ) - - port_path = module.get_bin_path('port', True, ['/opt/local/bin']) - - p = module.params - - if p["update_cache"]: - update_package_db(module, port_path) - - pkgs = p["name"].split(",") - - if p["state"] in ["present", "installed"]: - install_packages(module, port_path, pkgs) - - elif p["state"] in ["absent", "removed"]: - remove_packages(module, port_path, pkgs) - - elif p["state"] == "active": - activate_packages(module, port_path, pkgs) - - elif p["state"] == "inactive": - deactivate_packages(module, port_path, pkgs) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/npm b/library/packaging/npm deleted file mode 100644 index 1dd2e998492..00000000000 --- a/library/packaging/npm +++ /dev/null @@ -1,263 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Chris Hoffman -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: npm -short_description: Manage node.js packages with npm -description: - - Manage node.js packages with Node Package Manager (npm) -version_added: 1.2 -author: Chris Hoffman -options: - name: - description: - - The name of a node.js library to install - required: false - path: - description: - - The base path where to install the node.js libraries - required: false - version: - description: - - The version to be installed - required: false - global: - description: - - Install the node.js library globally - required: false - default: no - choices: [ "yes", "no" ] - executable: - description: - - The executable location for npm. - - This is useful if you are using a version manager, such as nvm - required: false - ignore_scripts: - description: - - Use the --ignore-scripts flag when installing. - required: false - choices: [ "yes", "no" ] - default: no - version_added: "1.8" - production: - description: - - Install dependencies in production mode, excluding devDependencies - required: false - choices: [ "yes", "no" ] - default: no - registry: - description: - - The registry to install modules from. - required: false - version_added: "1.6" - state: - description: - - The state of the node.js library - required: false - default: present - choices: [ "present", "absent", "latest" ] -''' - -EXAMPLES = ''' -description: Install "coffee-script" node.js package. -- npm: name=coffee-script path=/app/location - -description: Install "coffee-script" node.js package on version 1.6.1. -- npm: name=coffee-script version=1.6.1 path=/app/location - -description: Install "coffee-script" node.js package globally. -- npm: name=coffee-script global=yes - -description: Remove the globally package "coffee-script". -- npm: name=coffee-script global=yes state=absent - -description: Install "coffee-script" node.js package from custom registry. -- npm: name=coffee-script registry=http://registry.mysite.com - -description: Install packages based on package.json. -- npm: path=/app/location - -description: Update packages based on package.json to their latest version. -- npm: path=/app/location state=latest - -description: Install packages based on package.json using the npm installed with nvm v0.10.1. -- npm: path=/app/location executable=/opt/nvm/v0.10.1/bin/npm state=present -''' - -import os - -try: - import json -except ImportError: - import simplejson as json - -class Npm(object): - def __init__(self, module, **kwargs): - self.module = module - self.glbl = kwargs['glbl'] - self.name = kwargs['name'] - self.version = kwargs['version'] - self.path = kwargs['path'] - self.registry = kwargs['registry'] - self.production = kwargs['production'] - self.ignore_scripts = kwargs['ignore_scripts'] - - if kwargs['executable']: - self.executable = kwargs['executable'].split(' ') - else: - self.executable = [module.get_bin_path('npm', True)] - - if kwargs['version']: - self.name_version = self.name + '@' + self.version - else: - self.name_version = self.name - - def _exec(self, args, run_in_check_mode=False, check_rc=True): - if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = self.executable + args - - if self.glbl: - cmd.append('--global') - if self.production: - cmd.append('--production') - if self.ignore_scripts: - cmd.append('--ignore-scripts') - if self.name: - cmd.append(self.name_version) - if self.registry: - cmd.append('--registry') - cmd.append(self.registry) - - #If path is specified, cd into that path and run the command. - cwd = None - if self.path: - if not os.path.exists(self.path): - os.makedirs(self.path) - if not os.path.isdir(self.path): - self.module.fail_json(msg="path %s is not a directory" % self.path) - cwd = self.path - - rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) - return out - return '' - - def list(self): - cmd = ['list', '--json'] - - installed = list() - missing = list() - data = json.loads(self._exec(cmd, True, False)) - if 'dependencies' in data: - for dep in data['dependencies']: - if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']: - missing.append(dep) - elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']: - missing.append(dep) - else: - installed.append(dep) - if self.name and self.name not in installed: - missing.append(self.name) - #Named dependency not installed - else: - missing.append(self.name) - - return installed, missing - - def install(self): - return self._exec(['install']) - - def update(self): - return self._exec(['update']) - - def uninstall(self): - return self._exec(['uninstall']) - - def list_outdated(self): - outdated = list() - data = self._exec(['outdated'], True, False) - for dep in data.splitlines(): - if dep: - # node.js v0.10.22 changed the `npm outdated` module separator - # from "@" to " ". Split on both for backwards compatibility. - pkg, other = re.split('\s|@', dep, 1) - outdated.append(pkg) - - return outdated - - -def main(): - arg_spec = dict( - name=dict(default=None), - path=dict(default=None), - version=dict(default=None), - production=dict(default='no', type='bool'), - executable=dict(default=None), - registry=dict(default=None), - state=dict(default='present', choices=['present', 'absent', 'latest']), - ignore_scripts=dict(default=False, type='bool'), - ) - arg_spec['global'] = dict(default='no', type='bool') - module = AnsibleModule( - argument_spec=arg_spec, - supports_check_mode=True - ) - - name = module.params['name'] - path = module.params['path'] - version = module.params['version'] - glbl = module.params['global'] - production = module.params['production'] - executable = module.params['executable'] - registry = module.params['registry'] - state = module.params['state'] - ignore_scripts = module.params['ignore_scripts'] - - if not path and not glbl: - module.fail_json(msg='path must be specified when not using global') - if state == 'absent' and not name: - module.fail_json(msg='uninstalling a package is only available for named packages') - - npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, \ - executable=executable, registry=registry, ignore_scripts=ignore_scripts) - - changed = False - if state == 'present': - installed, missing = npm.list() - if len(missing): - changed = True - npm.install() - elif state == 'latest': - installed, missing = npm.list() - outdated = npm.list_outdated() - if len(missing) or len(outdated): - changed = True - npm.install() - else: #absent - installed, missing = npm.list() - if name in installed: - changed = True - npm.uninstall() - - module.exit_json(changed=changed) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/openbsd_pkg b/library/packaging/openbsd_pkg deleted file mode 100644 index 790fa89fac9..00000000000 --- a/library/packaging/openbsd_pkg +++ /dev/null @@ -1,373 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Patrik Lundin -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import re -import shlex -import syslog - -DOCUMENTATION = ''' ---- -module: openbsd_pkg -author: Patrik Lundin -version_added: "1.1" -short_description: Manage packages on OpenBSD. -description: - - Manage packages on OpenBSD using the pkg tools. -options: - name: - required: true - description: - - Name of the package. - state: - required: true - choices: [ present, latest, absent ] - description: - - C(present) will make sure the package is installed. - C(latest) will make sure the latest version of the package is installed. - C(absent) will make sure the specified package is not installed. -''' - -EXAMPLES = ''' -# Make sure nmap is installed -- openbsd_pkg: name=nmap state=present - -# Make sure nmap is the latest version -- openbsd_pkg: name=nmap state=latest - -# Make sure nmap is not installed -- openbsd_pkg: name=nmap state=absent - -# Specify a pkg flavour with '--' -- openbsd_pkg: name=vim--nox11 state=present - -# Specify the default flavour to avoid ambiguity errors -- openbsd_pkg: name=vim-- state=present -''' - -# Control if we write debug information to syslog. -debug = False - -# Function used for executing commands. -def execute_command(cmd, module): - if debug: - syslog.syslog("execute_command(): cmd = %s" % cmd) - # Break command line into arguments. - # This makes run_command() use shell=False which we need to not cause shell - # expansion of special characters like '*'. - cmd_args = shlex.split(cmd) - return module.run_command(cmd_args) - -# Function used for getting the name of a currently installed package. -def get_current_name(name, pkg_spec, module): - info_cmd = 'pkg_info' - (rc, stdout, stderr) = execute_command("%s" % (info_cmd), module) - if rc != 0: - return (rc, stdout, stderr) - - if pkg_spec['version']: - pattern = "^%s" % name - elif pkg_spec['flavor']: - pattern = "^%s-.*-%s\s" % (pkg_spec['stem'], pkg_spec['flavor']) - else: - pattern = "^%s-" % pkg_spec['stem'] - - if debug: - syslog.syslog("get_current_name(): pattern = %s" % pattern) - - for line in stdout.splitlines(): - if debug: - syslog.syslog("get_current_name: line = %s" % line) - match = re.search(pattern, line) - if match: - current_name = line.split()[0] - - return current_name - -# Function used to find out if a package is currently installed. -def get_package_state(name, pkg_spec, module): - info_cmd = 'pkg_info -e' - - if pkg_spec['version']: - command = "%s %s" % (info_cmd, name) - elif pkg_spec['flavor']: - command = "%s %s-*-%s" % (info_cmd, pkg_spec['stem'], pkg_spec['flavor']) - else: - command = "%s %s-*" % (info_cmd, pkg_spec['stem']) - - rc, stdout, stderr = execute_command(command, module) - - if (stderr): - module.fail_json(msg="failed in get_package_state(): " + stderr) - - if rc == 0: - return True - else: - return False - -# Function used to make sure a package is present. -def package_present(name, installed_state, pkg_spec, module): - if module.check_mode: - install_cmd = 'pkg_add -Imn' - else: - install_cmd = 'pkg_add -Im' - - if installed_state is False: - - # Attempt to install the package - (rc, stdout, stderr) = execute_command("%s %s" % (install_cmd, name), module) - - # The behaviour of pkg_add is a bit different depending on if a - # specific version is supplied or not. - # - # When a specific version is supplied the return code will be 0 when - # a package is found and 1 when it is not, if a version is not - # supplied the tool will exit 0 in both cases: - if pkg_spec['version']: - # Depend on the return code. - if debug: - syslog.syslog("package_present(): depending on return code") - if rc: - changed=False - else: - # Depend on stderr instead. - if debug: - syslog.syslog("package_present(): depending on stderr") - if stderr: - # There is a corner case where having an empty directory in - # installpath prior to the right location will result in a - # "file:/local/package/directory/ is empty" message on stderr - # while still installing the package, so we need to look for - # for a message like "packagename-1.0: ok" just in case. - match = re.search("\W%s-[^:]+: ok\W" % name, stdout) - if match: - # It turns out we were able to install the package. - if debug: - syslog.syslog("package_present(): we were able to install package") - pass - else: - # We really did fail, fake the return code. - if debug: - syslog.syslog("package_present(): we really did fail") - rc = 1 - changed=False - else: - if debug: - syslog.syslog("package_present(): stderr was not set") - - if rc == 0: - if module.check_mode: - module.exit_json(changed=True) - - changed=True - - else: - rc = 0 - stdout = '' - stderr = '' - changed=False - - return (rc, stdout, stderr, changed) - -# Function used to make sure a package is the latest available version. -def package_latest(name, installed_state, pkg_spec, module): - if module.check_mode: - upgrade_cmd = 'pkg_add -umn' - else: - upgrade_cmd = 'pkg_add -um' - - pre_upgrade_name = '' - - if installed_state is True: - - # Fetch name of currently installed package. - pre_upgrade_name = get_current_name(name, pkg_spec, module) - - if debug: - syslog.syslog("package_latest(): pre_upgrade_name = %s" % pre_upgrade_name) - - # Attempt to upgrade the package. - (rc, stdout, stderr) = execute_command("%s %s" % (upgrade_cmd, name), module) - - # Look for output looking something like "nmap-6.01->6.25: ok" to see if - # something changed (or would have changed). Use \W to delimit the match - # from progress meter output. - match = re.search("\W%s->.+: ok\W" % pre_upgrade_name, stdout) - if match: - if module.check_mode: - module.exit_json(changed=True) - - changed = True - else: - changed = False - - # FIXME: This part is problematic. Based on the issues mentioned (and - # handled) in package_present() it is not safe to blindly trust stderr - # as an indicator that the command failed, and in the case with - # empty installpath directories this will break. - # - # For now keep this safeguard here, but ignore it if we managed to - # parse out a successful update above. This way we will report a - # successful run when we actually modify something but fail - # otherwise. - if changed != True: - if stderr: - rc=1 - - return (rc, stdout, stderr, changed) - - else: - # If package was not installed at all just make it present. - if debug: - syslog.syslog("package_latest(): package is not installed, calling package_present()") - return package_present(name, installed_state, pkg_spec, module) - -# Function used to make sure a package is not installed. -def package_absent(name, installed_state, module): - if module.check_mode: - remove_cmd = 'pkg_delete -In' - else: - remove_cmd = 'pkg_delete -I' - - if installed_state is True: - - # Attempt to remove the package. - rc, stdout, stderr = execute_command("%s %s" % (remove_cmd, name), module) - - if rc == 0: - if module.check_mode: - module.exit_json(changed=True) - - changed=True - else: - changed=False - - else: - rc = 0 - stdout = '' - stderr = '' - changed=False - - return (rc, stdout, stderr, changed) - -# Function used to parse the package name based on packages-specs(7) -# The general name structure is "stem-version[-flavors]" -def parse_package_name(name, pkg_spec, module): - # Do some initial matches so we can base the more advanced regex on that. - version_match = re.search("-[0-9]", name) - versionless_match = re.search("--", name) - - # Stop if someone is giving us a name that both has a version and is - # version-less at the same time. - if version_match and versionless_match: - module.fail_json(msg="Package name both has a version and is version-less: " + name) - - # If name includes a version. - if version_match: - match = re.search("^(?P.*)-(?P[0-9][^-]*)(?P-)?(?P[a-z].*)?$", name) - if match: - pkg_spec['stem'] = match.group('stem') - pkg_spec['version_separator'] = '-' - pkg_spec['version'] = match.group('version') - pkg_spec['flavor_separator'] = match.group('flavor_separator') - pkg_spec['flavor'] = match.group('flavor') - else: - module.fail_json(msg="Unable to parse package name at version_match: " + name) - - # If name includes no version but is version-less ("--"). - elif versionless_match: - match = re.search("^(?P.*)--(?P[a-z].*)?$", name) - if match: - pkg_spec['stem'] = match.group('stem') - pkg_spec['version_separator'] = '-' - pkg_spec['version'] = None - pkg_spec['flavor_separator'] = '-' - pkg_spec['flavor'] = match.group('flavor') - else: - module.fail_json(msg="Unable to parse package name at versionless_match: " + name) - - # If name includes no version, and is not version-less, it is all a stem. - else: - match = re.search("^(?P.*)$", name) - if match: - pkg_spec['stem'] = match.group('stem') - pkg_spec['version_separator'] = None - pkg_spec['version'] = None - pkg_spec['flavor_separator'] = None - pkg_spec['flavor'] = None - else: - module.fail_json(msg="Unable to parse package name at else: " + name) - - # Sanity check that there are no trailing dashes in flavor. - # Try to stop strange stuff early so we can be strict later. - if pkg_spec['flavor']: - match = re.search("-$", pkg_spec['flavor']) - if match: - module.fail_json(msg="Trailing dash in flavor: " + pkg_spec['flavor']) - -# =========================================== -# Main control flow - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - state = dict(required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']), - ), - supports_check_mode = True - ) - - name = module.params['name'] - state = module.params['state'] - - rc = 0 - stdout = '' - stderr = '' - result = {} - result['name'] = name - result['state'] = state - - # Parse package name and put results in the pkg_spec dictionary. - pkg_spec = {} - parse_package_name(name, pkg_spec, module) - - # Get package state. - installed_state = get_package_state(name, pkg_spec, module) - - # Perform requested action. - if state in ['installed', 'present']: - (rc, stdout, stderr, changed) = package_present(name, installed_state, pkg_spec, module) - elif state in ['absent', 'removed']: - (rc, stdout, stderr, changed) = package_absent(name, installed_state, module) - elif state == 'latest': - (rc, stdout, stderr, changed) = package_latest(name, installed_state, pkg_spec, module) - - if rc != 0: - if stderr: - module.fail_json(msg=stderr) - else: - module.fail_json(msg=stdout) - - result['changed'] = changed - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/opkg b/library/packaging/opkg deleted file mode 100644 index 0187abe56a8..00000000000 --- a/library/packaging/opkg +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Patrick Pelletier -# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: opkg -author: Patrick Pelletier -short_description: Package manager for OpenWrt -description: - - Manages OpenWrt packages -version_added: "1.1" -options: - name: - description: - - name of package to install/remove - required: true - state: - description: - - state of the package - choices: [ 'present', 'absent' ] - required: false - default: present - update_cache: - description: - - update the package db first - required: false - default: "no" - choices: [ "yes", "no" ] -notes: [] -''' -EXAMPLES = ''' -- opkg: name=foo state=present -- opkg: name=foo state=present update_cache=yes -- opkg: name=foo state=absent -- opkg: name=foo,bar state=absent -''' - -import pipes - -def update_package_db(module, opkg_path): - """ Updates packages list. """ - - rc, out, err = module.run_command("%s update" % opkg_path) - - if rc != 0: - module.fail_json(msg="could not update package db") - - -def query_package(module, opkg_path, name, state="present"): - """ Returns whether a package is installed or not. """ - - if state == "present": - - rc, out, err = module.run_command("%s list-installed | grep -q ^%s" % (pipes.quote(opkg_path), pipes.quote(name)), use_unsafe_shell=True) - if rc == 0: - return True - - return False - - -def remove_packages(module, opkg_path, packages): - """ Uninstalls one or more packages if installed. """ - - remove_c = 0 - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, opkg_path, package): - continue - - rc, out, err = module.run_command("%s remove %s" % (opkg_path, package)) - - if query_package(module, opkg_path, package): - module.fail_json(msg="failed to remove %s: %s" % (package, out)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, opkg_path, packages): - """ Installs one or more packages if not already installed. """ - - install_c = 0 - - for package in packages: - if query_package(module, opkg_path, package): - continue - - rc, out, err = module.run_command("%s install %s" % (opkg_path, package)) - - if not query_package(module, opkg_path, package): - module.fail_json(msg="failed to install %s: %s" % (package, out)) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) - - module.exit_json(changed=False, msg="package(s) already present") - - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(aliases=["pkg"], required=True), - state = dict(default="present", choices=["present", "installed", "absent", "removed"]), - update_cache = dict(default="no", aliases=["update-cache"], type='bool') - ) - ) - - opkg_path = module.get_bin_path('opkg', True, ['/bin']) - - p = module.params - - if p["update_cache"]: - update_package_db(module, opkg_path) - - pkgs = p["name"].split(",") - - if p["state"] in ["present", "installed"]: - install_packages(module, opkg_path, pkgs) - - elif p["state"] in ["absent", "removed"]: - remove_packages(module, opkg_path, pkgs) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/pacman b/library/packaging/pacman deleted file mode 100644 index 0b23a2f93ce..00000000000 --- a/library/packaging/pacman +++ /dev/null @@ -1,234 +0,0 @@ -#!/usr/bin/python -tt -# -*- coding: utf-8 -*- - -# (c) 2012, Afterburn -# (c) 2013, Aaron Bull Schaefer -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: pacman -short_description: Manage packages with I(pacman) -description: - - Manage packages with the I(pacman) package manager, which is used by - Arch Linux and its variants. -version_added: "1.0" -author: Afterburn -notes: [] -requirements: [] -options: - name: - description: - - Name of the package to install, upgrade, or remove. - required: false - default: null - - state: - description: - - Desired state of the package. - required: false - default: "present" - choices: ["present", "absent"] - - recurse: - description: - - When removing a package, also remove its dependencies, provided - that they are not required by other packages and were not - explicitly installed by a user. - required: false - default: "no" - choices: ["yes", "no"] - version_added: "1.3" - - update_cache: - description: - - Whether or not to refresh the master package lists. This can be - run as part of a package installation or as a separate step. - required: false - default: "no" - choices: ["yes", "no"] -''' - -EXAMPLES = ''' -# Install package foo -- pacman: name=foo state=present - -# Remove packages foo and bar -- pacman: name=foo,bar state=absent - -# Recursively remove package baz -- pacman: name=baz state=absent recurse=yes - -# Run the equivalent of "pacman -Syy" as a separate step -- pacman: update_cache=yes -''' - -import json -import shlex -import os -import re -import sys - -PACMAN_PATH = "/usr/bin/pacman" - -def query_package(module, name, state="present"): - # pacman -Q returns 0 if the package is installed, - # 1 if it is not installed - if state == "present": - cmd = "pacman -Q %s" % (name) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc == 0: - return True - - return False - - -def update_package_db(module): - cmd = "pacman -Syy" - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc == 0: - return True - else: - module.fail_json(msg="could not update package db") - - -def remove_packages(module, packages): - if module.params["recurse"]: - args = "Rs" - else: - args = "R" - - remove_c = 0 - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, package): - continue - - cmd = "pacman -%s %s --noconfirm" % (args, package) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc != 0: - module.fail_json(msg="failed to remove %s" % (package)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, packages, package_files): - install_c = 0 - - for i, package in enumerate(packages): - if query_package(module, package): - continue - - if package_files[i]: - params = '-U %s' % package_files[i] - else: - params = '-S %s' % package - - cmd = "pacman %s --noconfirm" % (params) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc != 0: - module.fail_json(msg="failed to install %s" % (package)) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) - - module.exit_json(changed=False, msg="package(s) already installed") - - -def check_packages(module, packages, state): - would_be_changed = [] - for package in packages: - installed = query_package(module, package) - if ((state == "present" and not installed) or - (state == "absent" and installed)): - would_be_changed.append(package) - if would_be_changed: - if state == "absent": - state = "removed" - module.exit_json(changed=True, msg="%s package(s) would be %s" % ( - len(would_be_changed), state)) - else: - module.exit_json(change=False, msg="package(s) already %s" % state) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(aliases=['pkg']), - state = dict(default='present', choices=['present', 'installed', 'absent', 'removed']), - recurse = dict(default='no', choices=BOOLEANS, type='bool'), - update_cache = dict(default='no', aliases=['update-cache'], choices=BOOLEANS, type='bool')), - required_one_of = [['name', 'update_cache']], - supports_check_mode = True) - - if not os.path.exists(PACMAN_PATH): - module.fail_json(msg="cannot find pacman, looking for %s" % (PACMAN_PATH)) - - p = module.params - - # normalize the state parameter - if p['state'] in ['present', 'installed']: - p['state'] = 'present' - elif p['state'] in ['absent', 'removed']: - p['state'] = 'absent' - - if p["update_cache"] and not module.check_mode: - update_package_db(module) - if not p['name']: - module.exit_json(changed=True, msg='updated the package master lists') - - if p['update_cache'] and module.check_mode and not p['name']: - module.exit_json(changed=True, msg='Would have updated the package cache') - - if p['name']: - pkgs = p['name'].split(',') - - pkg_files = [] - for i, pkg in enumerate(pkgs): - if pkg.endswith('.pkg.tar.xz'): - # The package given is a filename, extract the raw pkg name from - # it and store the filename - pkg_files.append(pkg) - pkgs[i] = re.sub('-[0-9].*$', '', pkgs[i].split('/')[-1]) - else: - pkg_files.append(None) - - if module.check_mode: - check_packages(module, pkgs, p['state']) - - if p['state'] == 'present': - install_packages(module, pkgs, pkg_files) - elif p['state'] == 'absent': - remove_packages(module, pkgs) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/pip b/library/packaging/pip deleted file mode 100644 index 17f52c00398..00000000000 --- a/library/packaging/pip +++ /dev/null @@ -1,356 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Matt Wright -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -import tempfile -import os - -DOCUMENTATION = ''' ---- -module: pip -short_description: Manages Python library dependencies. -description: - - "Manage Python library dependencies. To use this module, one of the following keys is required: C(name) - or C(requirements)." -version_added: "0.7" -options: - name: - description: - - The name of a Python library to install or the url of the remote package. - required: false - default: null - version: - description: - - The version number to install of the Python library specified in the I(name) parameter - required: false - default: null - requirements: - description: - - The path to a pip requirements file - required: false - default: null - virtualenv: - description: - - An optional path to a I(virtualenv) directory to install into - required: false - default: null - virtualenv_site_packages: - version_added: "1.0" - description: - - Whether the virtual environment will inherit packages from the - global site-packages directory. Note that if this setting is - changed on an already existing virtual environment it will not - have any effect, the environment must be deleted and newly - created. - required: false - default: "no" - choices: [ "yes", "no" ] - virtualenv_command: - version_aded: "1.1" - description: - - The command or a pathname to the command to create the virtual - environment with. For example C(pyvenv), C(virtualenv), - C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv). - required: false - default: virtualenv - state: - description: - - The state of module - required: false - default: present - choices: [ "present", "absent", "latest" ] - extra_args: - description: - - Extra arguments passed to pip. - required: false - default: null - version_added: "1.0" - chdir: - description: - - cd into this directory before running the command - version_added: "1.3" - required: false - default: null - executable: - description: - - The explicit executable or a pathname to the executable to be used to - run pip for a specific version of Python installed in the system. For - example C(pip-3.3), if there are both Python 2.7 and 3.3 installations - in the system and you want to run pip for the Python 3.3 installation. - version_added: "1.3" - required: false - default: null -notes: - - Please note that virtualenv (U(http://www.virtualenv.org/)) must be installed on the remote host if the virtualenv parameter is specified. -requirements: [ "virtualenv", "pip" ] -author: Matt Wright -''' - -EXAMPLES = ''' -# Install (Bottle) python package. -- pip: name=bottle - -# Install (Bottle) python package on version 0.11. -- pip: name=bottle version=0.11 - -# Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args. -- pip: name='svn+http://myrepo/svn/MyApp#egg=MyApp' - -# Install (Bottle) into the specified (virtualenv), inheriting none of the globally installed modules -- pip: name=bottle virtualenv=/my_app/venv - -# Install (Bottle) into the specified (virtualenv), inheriting globally installed modules -- pip: name=bottle virtualenv=/my_app/venv virtualenv_site_packages=yes - -# Install (Bottle) into the specified (virtualenv), using Python 2.7 -- pip: name=bottle virtualenv=/my_app/venv virtualenv_command=virtualenv-2.7 - -# Install specified python requirements. -- pip: requirements=/my_app/requirements.txt - -# Install specified python requirements in indicated (virtualenv). -- pip: requirements=/my_app/requirements.txt virtualenv=/my_app/venv - -# Install specified python requirements and custom Index URL. -- pip: requirements=/my_app/requirements.txt extra_args='-i https://example.com/pypi/simple' - -# Install (Bottle) for Python 3.3 specifically,using the 'pip-3.3' executable. -- pip: name=bottle executable=pip-3.3 -''' - -def _get_cmd_options(module, cmd): - thiscmd = cmd + " --help" - rc, stdout, stderr = module.run_command(thiscmd) - if rc != 0: - module.fail_json(msg="Could not get output from %s: %s" % (thiscmd, stdout + stderr)) - - words = stdout.strip().split() - cmd_options = [ x for x in words if x.startswith('--') ] - return cmd_options - - -def _get_full_name(name, version=None): - if version is None: - resp = name - else: - resp = name + '==' + version - return resp - -def _is_present(name, version, installed_pkgs): - for pkg in installed_pkgs: - if '==' not in pkg: - continue - - [pkg_name, pkg_version] = pkg.split('==') - - if pkg_name == name and (version is None or version == pkg_version): - return True - - return False - - - -def _get_pip(module, env=None, executable=None): - # On Debian and Ubuntu, pip is pip. - # On Fedora18 and up, pip is python-pip. - # On Fedora17 and below, CentOS and RedHat 6 and 5, pip is pip-python. - # On Fedora, CentOS, and RedHat, the exception is in the virtualenv. - # There, pip is just pip. - candidate_pip_basenames = ['pip', 'python-pip', 'pip-python'] - pip = None - if executable is not None: - if os.path.isabs(executable): - pip = executable - else: - # If you define your own executable that executable should be the only candidate. - candidate_pip_basenames = [executable] - if pip is None: - if env is None: - opt_dirs = [] - else: - # Try pip with the virtualenv directory first. - opt_dirs = ['%s/bin' % env] - for basename in candidate_pip_basenames: - pip = module.get_bin_path(basename, False, opt_dirs) - if pip is not None: - break - # pip should have been found by now. The final call to get_bin_path will - # trigger fail_json. - if pip is None: - basename = candidate_pip_basenames[0] - pip = module.get_bin_path(basename, True, opt_dirs) - return pip - - -def _fail(module, cmd, out, err): - msg = '' - if out: - msg += "stdout: %s" % (out, ) - if err: - msg += "\n:stderr: %s" % (err, ) - module.fail_json(cmd=cmd, msg=msg) - - -def main(): - state_map = dict( - present='install', - absent='uninstall -y', - latest='install -U', - ) - - module = AnsibleModule( - argument_spec=dict( - state=dict(default='present', choices=state_map.keys()), - name=dict(default=None, required=False), - version=dict(default=None, required=False, type='str'), - requirements=dict(default=None, required=False), - virtualenv=dict(default=None, required=False), - virtualenv_site_packages=dict(default='no', type='bool'), - virtualenv_command=dict(default='virtualenv', required=False), - use_mirrors=dict(default='yes', type='bool'), - extra_args=dict(default=None, required=False), - chdir=dict(default=None, required=False), - executable=dict(default=None, required=False), - ), - required_one_of=[['name', 'requirements']], - mutually_exclusive=[['name', 'requirements']], - supports_check_mode=True - ) - - state = module.params['state'] - name = module.params['name'] - version = module.params['version'] - requirements = module.params['requirements'] - extra_args = module.params['extra_args'] - chdir = module.params['chdir'] - - if state == 'latest' and version is not None: - module.fail_json(msg='version is incompatible with state=latest') - - err = '' - out = '' - - env = module.params['virtualenv'] - virtualenv_command = module.params['virtualenv_command'] - - if env: - env = os.path.expanduser(env) - virtualenv = os.path.expanduser(virtualenv_command) - if os.path.basename(virtualenv) == virtualenv: - virtualenv = module.get_bin_path(virtualenv_command, True) - if not os.path.exists(os.path.join(env, 'bin', 'activate')): - if module.check_mode: - module.exit_json(changed=True) - if module.params['virtualenv_site_packages']: - cmd = '%s --system-site-packages %s' % (virtualenv, env) - else: - cmd_opts = _get_cmd_options(module, virtualenv) - if '--no-site-packages' in cmd_opts: - cmd = '%s --no-site-packages %s' % (virtualenv, env) - else: - cmd = '%s %s' % (virtualenv, env) - this_dir = tempfile.gettempdir() - if chdir: - this_dir = os.path.join(this_dir, chdir) - rc, out_venv, err_venv = module.run_command(cmd, cwd=this_dir) - out += out_venv - err += err_venv - if rc != 0: - _fail(module, cmd, out, err) - - pip = _get_pip(module, env, module.params['executable']) - - cmd = '%s %s' % (pip, state_map[state]) - - # If there's a virtualenv we want things we install to be able to use other - # installations that exist as binaries within this virtualenv. Example: we - # install cython and then gevent -- gevent needs to use the cython binary, - # not just a python package that will be found by calling the right python. - # So if there's a virtualenv, we add that bin/ to the beginning of the PATH - # in run_command by setting path_prefix here. - path_prefix = None - if env: - path_prefix="/".join(pip.split('/')[:-1]) - - # Automatically apply -e option to extra_args when source is a VCS url. VCS - # includes those beginning with svn+, git+, hg+ or bzr+ - if name: - if name.startswith('svn+') or name.startswith('git+') or \ - name.startswith('hg+') or name.startswith('bzr+'): - args_list = [] # used if extra_args is not used at all - if extra_args: - args_list = extra_args.split(' ') - if '-e' not in args_list: - args_list.append('-e') - # Ok, we will reconstruct the option string - extra_args = ' '.join(args_list) - - if extra_args: - cmd += ' %s' % extra_args - if name: - cmd += ' %s' % _get_full_name(name, version) - elif requirements: - cmd += ' -r %s' % requirements - - this_dir = tempfile.gettempdir() - if chdir: - this_dir = os.path.join(this_dir, chdir) - - if module.check_mode: - if env or extra_args or requirements or state == 'latest' or not name: - module.exit_json(changed=True) - elif name.startswith('svn+') or name.startswith('git+') or \ - name.startswith('hg+') or name.startswith('bzr+'): - module.exit_json(changed=True) - - freeze_cmd = '%s freeze' % pip - rc, out_pip, err_pip = module.run_command(freeze_cmd, cwd=this_dir) - - if rc != 0: - module.exit_json(changed=True) - - out += out_pip - err += err_pip - - is_present = _is_present(name, version, out.split()) - - changed = (state == 'present' and not is_present) or (state == 'absent' and is_present) - module.exit_json(changed=changed, cmd=freeze_cmd, stdout=out, stderr=err) - - rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=this_dir) - out += out_pip - err += err_pip - if rc == 1 and state == 'absent' and 'not installed' in out_pip: - pass # rc is 1 when attempting to uninstall non-installed package - elif rc != 0: - _fail(module, cmd, out, err) - - if state == 'absent': - changed = 'Successfully uninstalled' in out_pip - else: - changed = 'Successfully installed' in out_pip - - module.exit_json(changed=changed, cmd=cmd, name=name, version=version, - state=state, requirements=requirements, virtualenv=env, stdout=out, stderr=err) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/pkgin b/library/packaging/pkgin deleted file mode 100755 index 866c9f76a4c..00000000000 --- a/library/packaging/pkgin +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Shaun Zinck -# Written by Shaun Zinck -# Based on pacman module written by Afterburn -# that was based on apt module written by Matthew Williams -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - - -DOCUMENTATION = ''' ---- -module: pkgin -short_description: Package manager for SmartOS -description: - - Manages SmartOS packages -version_added: "1.0" -options: - name: - description: - - name of package to install/remove - required: true - state: - description: - - state of the package - choices: [ 'present', 'absent' ] - required: false - default: present -author: Shaun Zinck -notes: [] -''' - -EXAMPLES = ''' -# install package foo" -- pkgin: name=foo state=present - -# remove package foo -- pkgin: name=foo state=absent - -# remove packages foo and bar -- pkgin: name=foo,bar state=absent -''' - - -import json -import shlex -import os -import sys -import pipes - -def query_package(module, pkgin_path, name, state="present"): - - if state == "present": - - rc, out, err = module.run_command("%s -y list | grep ^%s" % (pipes.quote(pkgin_path), pipes.quote(name)), use_unsafe_shell=True) - - if rc == 0: - # At least one package with a package name that starts with ``name`` - # is installed. For some cases this is not sufficient to determine - # wether the queried package is installed. - # - # E.g. for ``name='gcc47'``, ``gcc47`` not being installed, but - # ``gcc47-libs`` being installed, ``out`` would be: - # - # gcc47-libs-4.7.2nb4 The GNU Compiler Collection (GCC) support shared libraries. - # - # Multiline output is also possible, for example with the same query - # and bot ``gcc47`` and ``gcc47-libs`` being installed: - # - # gcc47-libs-4.7.2nb4 The GNU Compiler Collection (GCC) support shared libraries. - # gcc47-4.7.2nb3 The GNU Compiler Collection (GCC) - 4.7 Release Series - - # Loop over lines in ``out`` - for line in out.split('\n'): - - # Strip description - # (results in sth. like 'gcc47-libs-4.7.2nb4') - pkgname_with_version = out.split(' ')[0] - - # Strip version - # (results in sth like 'gcc47-libs') - pkgname_without_version = '-'.join(pkgname_with_version.split('-')[:-1]) - - if name == pkgname_without_version: - return True - - return False - - -def remove_packages(module, pkgin_path, packages): - - remove_c = 0 - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, pkgin_path, package): - continue - - rc, out, err = module.run_command("%s -y remove %s" % (pkgin_path, package)) - - if query_package(module, pkgin_path, package): - module.fail_json(msg="failed to remove %s: %s" % (package, out)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, pkgin_path, packages): - - install_c = 0 - - for package in packages: - if query_package(module, pkgin_path, package): - continue - - rc, out, err = module.run_command("%s -y install %s" % (pkgin_path, package)) - - if not query_package(module, pkgin_path, package): - module.fail_json(msg="failed to install %s: %s" % (package, out)) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="present %s package(s)" % (install_c)) - - module.exit_json(changed=False, msg="package(s) already present") - - - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(default="present", choices=["present","absent"]), - name = dict(aliases=["pkg"], required=True))) - - pkgin_path = module.get_bin_path('pkgin', True, ['/opt/local/bin']) - - p = module.params - - pkgs = p["name"].split(",") - - if p["state"] == "present": - install_packages(module, pkgin_path, pkgs) - - elif p["state"] == "absent": - remove_packages(module, pkgin_path, pkgs) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/pkgng b/library/packaging/pkgng deleted file mode 100644 index a1f443fd4e1..00000000000 --- a/library/packaging/pkgng +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, bleader -# Written by bleader -# Based on pkgin module written by Shaun Zinck -# that was based on pacman module written by Afterburn -# that was based on apt module written by Matthew Williams -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - - -DOCUMENTATION = ''' ---- -module: pkgng -short_description: Package manager for FreeBSD >= 9.0 -description: - - Manage binary packages for FreeBSD using 'pkgng' which - is available in versions after 9.0. -version_added: "1.2" -options: - name: - description: - - name of package to install/remove - required: true - state: - description: - - state of the package - choices: [ 'present', 'absent' ] - required: false - default: present - cached: - description: - - use local package base or try to fetch an updated one - choices: [ 'yes', 'no' ] - required: false - default: no - annotation: - description: - - a comma-separated list of keyvalue-pairs of the form - <+/-/:>[=]. A '+' denotes adding an annotation, a - '-' denotes removing an annotation, and ':' denotes modifying an - annotation. - If setting or modifying annotations, a value must be provided. - required: false - version_added: "1.6" - pkgsite: - description: - - for pkgng versions before 1.1.4, specify packagesite to use - for downloading packages, if not specified, use settings from - /usr/local/etc/pkg.conf - for newer pkgng versions, specify a the name of a repository - configured in /usr/local/etc/pkg/repos - required: false -author: bleader -notes: - - When using pkgsite, be careful that already in cache packages won't be downloaded again. -''' - -EXAMPLES = ''' -# Install package foo -- pkgng: name=foo state=present - -# Annotate package foo and bar -- pkgng: name=foo,bar annotation=+test1=baz,-test2,:test3=foobar - -# Remove packages foo and bar -- pkgng: name=foo,bar state=absent -''' - - -import json -import shlex -import os -import re -import sys - -def query_package(module, pkgng_path, name): - - rc, out, err = module.run_command("%s info -g -e %s" % (pkgng_path, name)) - - if rc == 0: - return True - - return False - -def pkgng_older_than(module, pkgng_path, compare_version): - - rc, out, err = module.run_command("%s -v" % pkgng_path) - version = map(lambda x: int(x), re.split(r'[\._]', out)) - - i = 0 - new_pkgng = True - while compare_version[i] == version[i]: - i += 1 - if i == min(len(compare_version), len(version)): - break - else: - if compare_version[i] > version[i]: - new_pkgng = False - return not new_pkgng - - -def remove_packages(module, pkgng_path, packages): - - remove_c = 0 - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, pkgng_path, package): - continue - - if not module.check_mode: - rc, out, err = module.run_command("%s delete -y %s" % (pkgng_path, package)) - - if not module.check_mode and query_package(module, pkgng_path, package): - module.fail_json(msg="failed to remove %s: %s" % (package, out)) - - remove_c += 1 - - if remove_c > 0: - - return (True, "removed %s package(s)" % remove_c) - - return (False, "package(s) already absent") - - -def install_packages(module, pkgng_path, packages, cached, pkgsite): - - install_c = 0 - - # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions - # in /usr/local/etc/pkg/repos - old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4]) - if pkgsite != "": - if old_pkgng: - pkgsite = "PACKAGESITE=%s" % (pkgsite) - else: - pkgsite = "-r %s" % (pkgsite) - - if not module.check_mode and not cached: - if old_pkgng: - rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path)) - else: - rc, out, err = module.run_command("%s update" % (pkgng_path)) - if rc != 0: - module.fail_json(msg="Could not update catalogue") - - for package in packages: - if query_package(module, pkgng_path, package): - continue - - if not module.check_mode: - if old_pkgng: - rc, out, err = module.run_command("%s %s install -g -U -y %s" % (pkgsite, pkgng_path, package)) - else: - rc, out, err = module.run_command("%s install %s -g -U -y %s" % (pkgng_path, pkgsite, package)) - - if not module.check_mode and not query_package(module, pkgng_path, package): - module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err) - - install_c += 1 - - if install_c > 0: - return (True, "added %s package(s)" % (install_c)) - - return (False, "package(s) already present") - -def annotation_query(module, pkgng_path, package, tag): - rc, out, err = module.run_command("%s info -g -A %s" % (pkgng_path, package)) - match = re.search(r'^\s*(?P%s)\s*:\s*(?P\w+)' % tag, out, flags=re.MULTILINE) - if match: - return match.group('value') - return False - - -def annotation_add(module, pkgng_path, package, tag, value): - _value = annotation_query(module, pkgng_path, package, tag) - if not _value: - # Annotation does not exist, add it. - rc, out, err = module.run_command('%s annotate -y -A %s %s "%s"' - % (pkgng_path, package, tag, value)) - if rc != 0: - module.fail_json("could not annotate %s: %s" - % (package, out), stderr=err) - return True - elif _value != value: - # Annotation exists, but value differs - module.fail_json( - mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s" - % (package, tag, _value, value)) - return False - else: - # Annotation exists, nothing to do - return False - -def annotation_delete(module, pkgng_path, package, tag, value): - _value = annotation_query(module, pkgng_path, package, tag) - if _value: - rc, out, err = module.run_command('%s annotate -y -D %s %s' - % (pkgng_path, package, tag)) - if rc != 0: - module.fail_json("could not delete annotation to %s: %s" - % (package, out), stderr=err) - return True - return False - -def annotation_modify(module, pkgng_path, package, tag, value): - _value = annotation_query(module, pkgng_path, package, tag) - if not value: - # No such tag - module.fail_json("could not change annotation to %s: tag %s does not exist" - % (package, tag)) - elif _value == value: - # No change in value - return False - else: - rc,out,err = module.run_command('%s annotate -y -M %s %s "%s"' - % (pkgng_path, package, tag, value)) - if rc != 0: - module.fail_json("could not change annotation annotation to %s: %s" - % (package, out), stderr=err) - return True - - -def annotate_packages(module, pkgng_path, packages, annotation): - annotate_c = 0 - annotations = map(lambda _annotation: - re.match(r'(?P[\+-:])(?P\w+)(=(?P\w+))?', - _annotation).groupdict(), - re.split(r',', annotation)) - - operation = { - '+': annotation_add, - '-': annotation_delete, - ':': annotation_modify - } - - for package in packages: - for _annotation in annotations: - annotate_c += ( 1 if operation[_annotation['operation']]( - module, pkgng_path, package, - _annotation['tag'], _annotation['value']) else 0 ) - - if annotate_c > 0: - return (True, "added %s annotations." % annotate_c) - return (False, "changed no annotations") - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(default="present", choices=["present","absent"], required=False), - name = dict(aliases=["pkg"], required=True), - cached = dict(default=False, type='bool'), - annotation = dict(default="", required=False), - pkgsite = dict(default="", required=False)), - supports_check_mode = True) - - pkgng_path = module.get_bin_path('pkg', True) - - p = module.params - - pkgs = p["name"].split(",") - - changed = False - msgs = [] - - if p["state"] == "present": - _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"]) - changed = changed or _changed - msgs.append(_msg) - - elif p["state"] == "absent": - _changed, _msg = remove_packages(module, pkgng_path, pkgs) - changed = changed or _changed - msgs.append(_msg) - - if p["annotation"]: - _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"]) - changed = changed or _changed - msgs.append(_msg) - - module.exit_json(changed=changed, msg=", ".join(msgs)) - - - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/pkgutil b/library/packaging/pkgutil deleted file mode 100644 index 78a7db72bf5..00000000000 --- a/library/packaging/pkgutil +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Alexander Winkler -# based on svr4pkg by -# Boyd Adamson (2012) -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: pkgutil -short_description: Manage CSW-Packages on Solaris -description: - - Manages CSW packages (SVR4 format) on Solaris 10 and 11. - - These were the native packages on Solaris <= 10 and are available - as a legacy feature in Solaris 11. - - Pkgutil is an advanced packaging system, which resolves dependency on installation. - It is designed for CSW packages. -version_added: "1.3" -author: Alexander Winkler -options: - name: - description: - - Package name, e.g. (C(CSWnrpe)) - required: true - site: - description: - - Specifies the repository path to install the package from. - - Its global definition is done in C(/etc/opt/csw/pkgutil.conf). - state: - description: - - Whether to install (C(present)), or remove (C(absent)) a package. - - The upgrade (C(latest)) operation will update/install the package to the latest version available. - - "Note: The module has a limitation that (C(latest)) only works for one package, not lists of them." - required: true - choices: ["present", "absent", "latest"] -''' - -EXAMPLES = ''' -# Install a package -pkgutil: name=CSWcommon state=present - -# Install a package from a specific repository -pkgutil: name=CSWnrpe site='ftp://myinternal.repo/opencsw/kiel state=latest' -''' - -import os -import pipes - -def package_installed(module, name): - cmd = [module.get_bin_path('pkginfo', True)] - cmd.append('-q') - cmd.append(name) - rc, out, err = module.run_command(' '.join(cmd)) - if rc == 0: - return True - else: - return False - -def package_latest(module, name, site): - # Only supports one package - cmd = [ 'pkgutil', '--single', '-c' ] - if site is not None: - cmd += [ '-t', pipes.quote(site) ] - cmd.append(pipes.quote(name)) - cmd += [ '| tail -1 | grep -v SAME' ] - rc, out, err = module.run_command(' '.join(cmd), use_unsafe_shell=True) - if rc == 1: - return True - else: - return False - -def run_command(module, cmd): - progname = cmd[0] - cmd[0] = module.get_bin_path(progname, True) - return module.run_command(cmd) - -def package_install(module, state, name, site): - cmd = [ 'pkgutil', '-iy' ] - if site is not None: - cmd += [ '-t', site ] - if state == 'latest': - cmd += [ '-f' ] - cmd.append(name) - (rc, out, err) = run_command(module, cmd) - return (rc, out, err) - -def package_upgrade(module, name, site): - cmd = [ 'pkgutil', '-ufy' ] - if site is not None: - cmd += [ '-t', site ] - cmd.append(name) - (rc, out, err) = run_command(module, cmd) - return (rc, out, err) - -def package_uninstall(module, name): - cmd = [ 'pkgutil', '-ry', name] - (rc, out, err) = run_command(module, cmd) - return (rc, out, err) - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required = True), - state = dict(required = True, choices=['present', 'absent','latest']), - site = dict(default = None), - ), - supports_check_mode=True - ) - name = module.params['name'] - state = module.params['state'] - site = module.params['site'] - rc = None - out = '' - err = '' - result = {} - result['name'] = name - result['state'] = state - - if state == 'present': - if not package_installed(module, name): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = package_install(module, state, name, site) - # Stdout is normally empty but for some packages can be - # very long and is not often useful - if len(out) > 75: - out = out[:75] + '...' - - elif state == 'latest': - if not package_installed(module, name): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = package_install(module, state, name, site) - else: - if not package_latest(module, name, site): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = package_upgrade(module, name, site) - if len(out) > 75: - out = out[:75] + '...' - - elif state == 'absent': - if package_installed(module, name): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = package_uninstall(module, name) - out = out[:75] - - if rc is None: - result['changed'] = False - else: - result['changed'] = True - - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/portage b/library/packaging/portage deleted file mode 100644 index 85027bfc79b..00000000000 --- a/library/packaging/portage +++ /dev/null @@ -1,405 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Yap Sok Ann -# Written by Yap Sok Ann -# Based on apt module written by Matthew Williams -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - - -DOCUMENTATION = ''' ---- -module: portage -short_description: Package manager for Gentoo -description: - - Manages Gentoo packages -version_added: "1.6" - -options: - package: - description: - - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world) - required: false - default: null - - state: - description: - - State of the package atom - required: false - default: "present" - choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged" ] - - update: - description: - - Update packages to the best version available (--update) - required: false - default: null - choices: [ "yes" ] - - deep: - description: - - Consider the entire dependency tree of packages (--deep) - required: false - default: null - choices: [ "yes" ] - - newuse: - description: - - Include installed packages where USE flags have changed (--newuse) - required: false - default: null - choices: [ "yes" ] - - changed_use: - description: - - Include installed packages where USE flags have changed, except when - - flags that the user has not enabled are added or removed - - (--changed-use) - required: false - default: null - choices: [ "yes" ] - version_added: 1.8 - - oneshot: - description: - - Do not add the packages to the world file (--oneshot) - required: false - default: null - choices: [ "yes" ] - - noreplace: - description: - - Do not re-emerge installed packages (--noreplace) - required: false - default: null - choices: [ "yes" ] - - nodeps: - description: - - Only merge packages but not their dependencies (--nodeps) - required: false - default: null - choices: [ "yes" ] - - onlydeps: - description: - - Only merge packages' dependencies but not the packages (--onlydeps) - required: false - default: null - choices: [ "yes" ] - - depclean: - description: - - Remove packages not needed by explicitly merged packages (--depclean) - - If no package is specified, clean up the world's dependencies - - Otherwise, --depclean serves as a dependency aware version of --unmerge - required: false - default: null - choices: [ "yes" ] - - quiet: - description: - - Run emerge in quiet mode (--quiet) - required: false - default: null - choices: [ "yes" ] - - verbose: - description: - - Run emerge in verbose mode (--verbose) - required: false - default: null - choices: [ "yes" ] - - sync: - description: - - Sync package repositories first - - If yes, perform "emerge --sync" - - If web, perform "emerge-webrsync" - required: false - default: null - choices: [ "yes", "web" ] - -requirements: [ gentoolkit ] -author: Yap Sok Ann -notes: [] -''' - -EXAMPLES = ''' -# Make sure package foo is installed -- portage: package=foo state=present - -# Make sure package foo is not installed -- portage: package=foo state=absent - -# Update package foo to the "best" version -- portage: package=foo update=yes - -# Sync repositories and update world -- portage: package=@world update=yes deep=yes sync=yes - -# Remove unneeded packages -- portage: depclean=yes - -# Remove package foo if it is not explicitly needed -- portage: package=foo state=absent depclean=yes -''' - - -import os -import pipes - - -def query_package(module, package, action): - if package.startswith('@'): - return query_set(module, package, action) - return query_atom(module, package, action) - - -def query_atom(module, atom, action): - cmd = '%s list %s' % (module.equery_path, atom) - - rc, out, err = module.run_command(cmd) - return rc == 0 - - -def query_set(module, set, action): - system_sets = [ - '@live-rebuild', - '@module-rebuild', - '@preserved-rebuild', - '@security', - '@selected', - '@system', - '@world', - '@x11-module-rebuild', - ] - - if set in system_sets: - if action == 'unmerge': - module.fail_json(msg='set %s cannot be removed' % set) - return False - - world_sets_path = '/var/lib/portage/world_sets' - if not os.path.exists(world_sets_path): - return False - - cmd = 'grep %s %s' % (set, world_sets_path) - - rc, out, err = module.run_command(cmd) - return rc == 0 - - -def sync_repositories(module, webrsync=False): - if module.check_mode: - module.exit_json(msg='check mode not supported by sync') - - if webrsync: - webrsync_path = module.get_bin_path('emerge-webrsync', required=True) - cmd = '%s --quiet' % webrsync_path - else: - cmd = '%s --sync --quiet' % module.emerge_path - - rc, out, err = module.run_command(cmd) - if rc != 0: - module.fail_json(msg='could not sync package repositories') - - -# Note: In the 3 functions below, equery is done one-by-one, but emerge is done -# in one go. If that is not desirable, split the packages into multiple tasks -# instead of joining them together with comma. - - -def emerge_packages(module, packages): - p = module.params - - if not (p['update'] or p['noreplace']): - for package in packages: - if not query_package(module, package, 'emerge'): - break - else: - module.exit_json(changed=False, msg='Packages already present.') - - args = [] - emerge_flags = { - 'update': '--update', - 'deep': '--deep', - 'newuse': '--newuse', - 'changed_use': '--changed-use', - 'oneshot': '--oneshot', - 'noreplace': '--noreplace', - 'nodeps': '--nodeps', - 'onlydeps': '--onlydeps', - 'quiet': '--quiet', - 'verbose': '--verbose', - } - for flag, arg in emerge_flags.iteritems(): - if p[flag]: - args.append(arg) - - cmd, (rc, out, err) = run_emerge(module, packages, *args) - if rc != 0: - module.fail_json( - cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages not installed.', - ) - - changed = True - for line in out.splitlines(): - if line.startswith('>>> Emerging (1 of'): - break - else: - changed = False - - module.exit_json( - changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages installed.', - ) - - -def unmerge_packages(module, packages): - p = module.params - - for package in packages: - if query_package(module, package, 'unmerge'): - break - else: - module.exit_json(changed=False, msg='Packages already absent.') - - args = ['--unmerge'] - - for flag in ['quiet', 'verbose']: - if p[flag]: - args.append('--%s' % flag) - - cmd, (rc, out, err) = run_emerge(module, packages, *args) - - if rc != 0: - module.fail_json( - cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages not removed.', - ) - - module.exit_json( - changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages removed.', - ) - - -def cleanup_packages(module, packages): - p = module.params - - if packages: - for package in packages: - if query_package(module, package, 'unmerge'): - break - else: - module.exit_json(changed=False, msg='Packages already absent.') - - args = ['--depclean'] - - for flag in ['quiet', 'verbose']: - if p[flag]: - args.append('--%s' % flag) - - cmd, (rc, out, err) = run_emerge(module, packages, *args) - if rc != 0: - module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err) - - removed = 0 - for line in out.splitlines(): - if not line.startswith('Number removed:'): - continue - parts = line.split(':') - removed = int(parts[1].strip()) - changed = removed > 0 - - module.exit_json( - changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Depclean completed.', - ) - - -def run_emerge(module, packages, *args): - args = list(args) - - if module.check_mode: - args.append('--pretend') - - cmd = [module.emerge_path] + args + packages - return cmd, module.run_command(cmd) - - -portage_present_states = ['present', 'emerged', 'installed'] -portage_absent_states = ['absent', 'unmerged', 'removed'] - - -def main(): - module = AnsibleModule( - argument_spec=dict( - package=dict(default=None, aliases=['name']), - state=dict( - default=portage_present_states[0], - choices=portage_present_states + portage_absent_states, - ), - update=dict(default=None, choices=['yes']), - deep=dict(default=None, choices=['yes']), - newuse=dict(default=None, choices=['yes']), - changed_use=dict(default=None, choices=['yes']), - oneshot=dict(default=None, choices=['yes']), - noreplace=dict(default=None, choices=['yes']), - nodeps=dict(default=None, choices=['yes']), - onlydeps=dict(default=None, choices=['yes']), - depclean=dict(default=None, choices=['yes']), - quiet=dict(default=None, choices=['yes']), - verbose=dict(default=None, choices=['yes']), - sync=dict(default=None, choices=['yes', 'web']), - ), - required_one_of=[['package', 'sync', 'depclean']], - mutually_exclusive=[['nodeps', 'onlydeps'], ['quiet', 'verbose']], - supports_check_mode=True, - ) - - module.emerge_path = module.get_bin_path('emerge', required=True) - module.equery_path = module.get_bin_path('equery', required=True) - - p = module.params - - if p['sync']: - sync_repositories(module, webrsync=(p['sync'] == 'web')) - if not p['package']: - module.exit_json(msg='Sync successfully finished.') - - packages = p['package'].split(',') if p['package'] else [] - - if p['depclean']: - if packages and p['state'] not in portage_absent_states: - module.fail_json( - msg='Depclean can only be used with package when the state is ' - 'one of: %s' % portage_absent_states, - ) - - cleanup_packages(module, packages) - - elif p['state'] in portage_present_states: - emerge_packages(module, packages) - - elif p['state'] in portage_absent_states: - unmerge_packages(module, packages) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/portinstall b/library/packaging/portinstall deleted file mode 100644 index 068f413af72..00000000000 --- a/library/packaging/portinstall +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, berenddeboer -# Written by berenddeboer -# Based on pkgng module written by bleader -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - - -DOCUMENTATION = ''' ---- -module: portinstall -short_description: Installing packages from FreeBSD's ports system -description: - - Manage packages for FreeBSD using 'portinstall'. -version_added: "1.3" -options: - name: - description: - - name of package to install/remove - required: true - state: - description: - - state of the package - choices: [ 'present', 'absent' ] - required: false - default: present - use_packages: - description: - - use packages instead of ports whenever available - choices: [ 'yes', 'no' ] - required: false - default: yes -author: berenddeboer -''' - -EXAMPLES = ''' -# Install package foo -- portinstall: name=foo state=present - -# Install package security/cyrus-sasl2-saslauthd -- portinstall: name=security/cyrus-sasl2-saslauthd state=present - -# Remove packages foo and bar -- portinstall: name=foo,bar state=absent -''' - - -import json -import shlex -import os -import sys - -def query_package(module, name): - - pkg_info_path = module.get_bin_path('pkg_info', False) - - # Assume that if we have pkg_info, we haven't upgraded to pkgng - if pkg_info_path: - pkgng = False - pkg_glob_path = module.get_bin_path('pkg_glob', True) - rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, pipes.quote(name)), use_unsafe_shell=True) - else: - pkgng = True - pkg_info_path = module.get_bin_path('pkg', True) - pkg_info_path = pkg_info_path + " info" - rc, out, err = module.run_command("%s %s" % (pkg_info_path, name)) - - found = rc == 0 - - if not found: - # databases/mysql55-client installs as mysql-client, so try solving - # that the ugly way. Pity FreeBSD doesn't have a fool proof way of checking - # some package is installed - name_without_digits = re.sub('[0-9]', '', name) - if name != name_without_digits: - if pkgng: - rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits)) - else: - rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits)) - - found = rc == 0 - - return found - - -def matching_packages(module, name): - - ports_glob_path = module.get_bin_path('ports_glob', True) - rc, out, err = module.run_command("%s %s" % (ports_glob_path, name)) - #counts the numer of packages found - occurrences = out.count('\n') - if occurrences == 0: - name_without_digits = re.sub('[0-9]', '', name) - if name != name_without_digits: - rc, out, err = module.run_command("%s %s" % (ports_glob_path, name_without_digits)) - occurrences = out.count('\n') - return occurrences - - -def remove_packages(module, packages): - - remove_c = 0 - pkg_glob_path = module.get_bin_path('pkg_glob', True) - - # If pkg_delete not found, we assume pkgng - pkg_delete_path = module.get_bin_path('pkg_delete', False) - if not pkg_delete_path: - pkg_delete_path = module.get_bin_path('pkg', True) - pkg_delete_path = pkg_delete_path + " delete -y" - - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, package): - continue - - rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, pipes.quote(package)), use_unsafe_shell=True) - - if query_package(module, package): - name_without_digits = re.sub('[0-9]', '', package) - rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, pipes.quote(name_without_digits)),use_unsafe_shell=True) - if query_package(module, package): - module.fail_json(msg="failed to remove %s: %s" % (package, out)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, packages, use_packages): - - install_c = 0 - - # If portinstall not found, automagically install - portinstall_path = module.get_bin_path('portinstall', False) - if not portinstall_path: - pkg_path = module.get_bin_path('pkg', False) - if pkg_path: - module.run_command("pkg install -y portupgrade") - portinstall_path = module.get_bin_path('portinstall', True) - - if use_packages == "yes": - portinstall_params="--use-packages" - else: - portinstall_params="" - - for package in packages: - if query_package(module, package): - continue - - # TODO: check how many match - matches = matching_packages(module, package) - if matches == 1: - rc, out, err = module.run_command("%s --batch %s %s" % (portinstall_path, portinstall_params, package)) - if not query_package(module, package): - module.fail_json(msg="failed to install %s: %s" % (package, out)) - elif matches == 0: - module.fail_json(msg="no matches for package %s" % (package)) - else: - module.fail_json(msg="%s matches found for package name %s" % (matches, package)) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="present %s package(s)" % (install_c)) - - module.exit_json(changed=False, msg="package(s) already present") - - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(default="present", choices=["present","absent"]), - name = dict(aliases=["pkg"], required=True), - use_packages = dict(type='bool', default='yes'))) - - p = module.params - - pkgs = p["name"].split(",") - - if p["state"] == "present": - install_packages(module, pkgs, p["use_packages"]) - - elif p["state"] == "absent": - remove_packages(module, pkgs) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/rhn_channel b/library/packaging/rhn_channel deleted file mode 100644 index 05a155f7ca1..00000000000 --- a/library/packaging/rhn_channel +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/python - -# (c) Vincent Van de Kussen -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rhn_channel -short_description: Adds or removes Red Hat software channels -description: - - Adds or removes Red Hat software channels -version_added: "1.1" -author: Vincent Van der Kussen -notes: - - this module fetches the system id from RHN. -requirements: - - none -options: - name: - description: - - name of the software channel - required: true - default: null - sysname: - description: - - name of the system as it is known in RHN/Satellite - required: true - default: null - state: - description: - - whether the channel should be present or not - required: false - default: present - url: - description: - - The full url to the RHN/Satellite api - required: true - user: - description: - - RHN/Satellite user - required: true - password: - description: - - "the user's password" - required: true -''' - -EXAMPLES = ''' -- rhn_channel: name=rhel-x86_64-server-v2vwin-6 sysname=server01 url=https://rhn.redhat.com/rpc/api user=rhnuser password=guessme -''' - -import xmlrpclib -from operator import itemgetter -import re - - -# ------------------------------------------------------- # - -def get_systemid(client, session, sysname): - systems = client.system.listUserSystems(session) - for system in systems: - if system.get('name') == sysname: - idres = system.get('id') - idd = int(idres) - return idd - -# ------------------------------------------------------- # - -# unused: -# -#def get_localsystemid(): -# f = open("/etc/sysconfig/rhn/systemid", "r") -# content = f.read() -# loc_id = re.search(r'\b(ID-)(\d{10})' ,content) -# return loc_id.group(2) - -# ------------------------------------------------------- # - -def subscribe_channels(channels, client, session, sysname, sys_id): - c = base_channels(client, session, sys_id) - c.append(channels) - return client.channel.software.setSystemChannels(session, sys_id, c) - -# ------------------------------------------------------- # - -def unsubscribe_channels(channels, client, session, sysname, sys_id): - c = base_channels(client, session, sys_id) - c.remove(channels) - return client.channel.software.setSystemChannels(session, sys_id, c) - -# ------------------------------------------------------- # - -def base_channels(client, session, sys_id): - basechan = client.channel.software.listSystemChannels(session, sys_id) - try: - chans = [item['label'] for item in basechan] - except KeyError: - chans = [item['channel_label'] for item in basechan] - return chans - -# ------------------------------------------------------- # - - -def main(): - - module = AnsibleModule( - argument_spec = dict( - state = dict(default='present', choices=['present', 'absent']), - name = dict(required=True), - sysname = dict(required=True), - url = dict(required=True), - user = dict(required=True), - password = dict(required=True, aliases=['pwd']), - ) -# supports_check_mode=True - ) - - state = module.params['state'] - channelname = module.params['name'] - systname = module.params['sysname'] - saturl = module.params['url'] - user = module.params['user'] - password = module.params['password'] - - #initialize connection - client = xmlrpclib.Server(saturl, verbose=0) - session = client.auth.login(user, password) - - # get systemid - sys_id = get_systemid(client, session, systname) - - # get channels for system - chans = base_channels(client, session, sys_id) - - - if state == 'present': - if channelname in chans: - module.exit_json(changed=False, msg="Channel %s already exists" % channelname) - else: - subscribe_channels(channelname, client, session, systname, sys_id) - module.exit_json(changed=True, msg="Channel %s added" % channelname) - - if state == 'absent': - if not channelname in chans: - module.exit_json(changed=False, msg="Not subscribed to channel %s." % channelname) - else: - unsubscribe_channels(channelname, client, session, systname, sys_id) - module.exit_json(changed=True, msg="Channel %s removed" % channelname) - - client.auth.logout(session) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/rhn_register b/library/packaging/rhn_register deleted file mode 100644 index 1e92405c827..00000000000 --- a/library/packaging/rhn_register +++ /dev/null @@ -1,336 +0,0 @@ -#!/usr/bin/python - -# (c) James Laska -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rhn_register -short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command -description: - - Manage registration to the Red Hat Network. -version_added: "1.2" -author: James Laska -notes: - - In order to register a system, rhnreg_ks requires either a username and password, or an activationkey. -requirements: - - rhnreg_ks -options: - state: - description: - - whether to register (C(present)), or unregister (C(absent)) a system - required: false - choices: [ "present", "absent" ] - default: "present" - username: - description: - - Red Hat Network username - required: False - default: null - password: - description: - - Red Hat Network password - required: False - default: null - server_url: - description: - - Specify an alternative Red Hat Network server URL - required: False - default: Current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date) is the default - activationkey: - description: - - supply an activation key for use with registration - required: False - default: null - channels: - description: - - Optionally specify a list of comma-separated channels to subscribe to upon successful registration. - required: false - default: [] -''' - -EXAMPLES = ''' -# Unregister system from RHN. -- rhn_register: state=absent username=joe_user password=somepass - -# Register as user (joe_user) with password (somepass) and auto-subscribe to available content. -- rhn_register: state=present username=joe_user password=somepass - -# Register with activationkey (1-222333444) and enable extended update support. -- rhn_register: state=present activationkey=1-222333444 enable_eus=true - -# Register as user (joe_user) with password (somepass) against a satellite -# server specified by (server_url). -- rhn_register: > - state=present - username=joe_user - password=somepass - server_url=https://xmlrpc.my.satellite/XMLRPC - -# Register as user (joe_user) with password (somepass) and enable -# channels (rhel-x86_64-server-6-foo-1) and (rhel-x86_64-server-6-bar-1). -- rhn_register: state=present username=joe_user - password=somepass - channels=rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1 -''' - -import sys -import types -import xmlrpclib -import urlparse - -# Attempt to import rhn client tools -sys.path.insert(0, '/usr/share/rhn') -try: - import up2date_client - import up2date_client.config -except ImportError, e: - module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?\n%s" % e) - -# INSERT REDHAT SNIPPETS -from ansible.module_utils.redhat import * -# INSERT COMMON SNIPPETS -from ansible.module_utils.basic import * - -class Rhn(RegistrationBase): - - def __init__(self, username=None, password=None): - RegistrationBase.__init__(self, username, password) - self.config = self.load_config() - - def load_config(self): - ''' - Read configuration from /etc/sysconfig/rhn/up2date - ''' - self.config = up2date_client.config.initUp2dateConfig() - - # Add support for specifying a default value w/o having to standup some - # configuration. Yeah, I know this should be subclassed ... but, oh - # well - def get_option_default(self, key, default=''): - # ignore pep8 W601 errors for this line - # setting this to use 'in' does not work in the rhn library - if self.has_key(key): - return self[key] - else: - return default - - self.config.get_option = types.MethodType(get_option_default, self.config, up2date_client.config.Config) - - return self.config - - @property - def hostname(self): - ''' - Return the non-xmlrpc RHN hostname. This is a convenience method - used for displaying a more readable RHN hostname. - - Returns: str - ''' - url = urlparse.urlparse(self.config['serverURL']) - return url[1].replace('xmlrpc.','') - - @property - def systemid(self): - systemid = None - xpath_str = "//member[name='system_id']/value/string" - - if os.path.isfile(self.config['systemIdPath']): - fd = open(self.config['systemIdPath'], 'r') - xml_data = fd.read() - fd.close() - - # Ugh, xml parsing time ... - # First, try parsing with libxml2 ... - if systemid is None: - try: - import libxml2 - doc = libxml2.parseDoc(xml_data) - ctxt = doc.xpathNewContext() - systemid = ctxt.xpathEval(xpath_str)[0].content - doc.freeDoc() - ctxt.xpathFreeContext() - except ImportError: - pass - - # m-kay, let's try with lxml now ... - if systemid is None: - try: - from lxml import etree - root = etree.fromstring(xml_data) - systemid = root.xpath(xpath_str)[0].text - except ImportError: - pass - - # Strip the 'ID-' prefix - if systemid is not None and systemid.startswith('ID-'): - systemid = systemid[3:] - - return int(systemid) - - @property - def is_registered(self): - ''' - Determine whether the current system is registered. - - Returns: True|False - ''' - return os.path.isfile(self.config['systemIdPath']) - - def configure(self, server_url): - ''' - Configure system for registration - ''' - - self.config.set('serverURL', server_url) - self.config.save() - - def enable(self): - ''' - Prepare the system for RHN registration. This includes ... - * enabling the rhnplugin yum plugin - * disabling the subscription-manager yum plugin - ''' - RegistrationBase.enable(self) - self.update_plugin_conf('rhnplugin', True) - self.update_plugin_conf('subscription-manager', False) - - def register(self, enable_eus=False, activationkey=None): - ''' - Register system to RHN. If enable_eus=True, extended update - support will be requested. - ''' - register_cmd = "/usr/sbin/rhnreg_ks --username='%s' --password='%s' --force" % (self.username, self.password) - if self.module.params.get('server_url', None): - register_cmd += " --serverUrl=%s" % self.module.params.get('server_url') - if enable_eus: - register_cmd += " --use-eus-channel" - if activationkey is not None: - register_cmd += " --activationkey '%s'" % activationkey - # FIXME - support --profilename - # FIXME - support --systemorgid - rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True, use_unsafe_shell=True) - - def api(self, method, *args): - ''' - Convenience RPC wrapper - ''' - if not hasattr(self, 'server') or self.server is None: - if self.hostname != 'rhn.redhat.com': - url = "https://%s/rpc/api" % self.hostname - else: - url = "https://xmlrpc.%s/rpc/api" % self.hostname - self.server = xmlrpclib.Server(url, verbose=0) - self.session = self.server.auth.login(self.username, self.password) - - func = getattr(self.server, method) - return func(self.session, *args) - - def unregister(self): - ''' - Unregister a previously registered system - ''' - - # Initiate RPC connection - self.api('system.deleteSystems', [self.systemid]) - - # Remove systemid file - os.unlink(self.config['systemIdPath']) - - def subscribe(self, channels=[]): - if len(channels) <= 0: - return - current_channels = self.api('channel.software.listSystemChannels', self.systemid) - new_channels = [item['channel_label'] for item in current_channels] - new_channels.extend(channels) - return self.api('channel.software.setSystemChannels', self.systemid, new_channels) - - def _subscribe(self, channels=[]): - ''' - Subscribe to requested yum repositories using 'rhn-channel' command - ''' - rhn_channel_cmd = "rhn-channel --user='%s' --password='%s'" % (self.username, self.password) - rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --available-channels", check_rc=True) - - # Enable requested repoid's - for wanted_channel in channels: - # Each inserted repo regexp will be matched. If no match, no success. - for available_channel in stdout.rstrip().split('\n'): # .rstrip() because of \n at the end -> empty string at the end - if re.search(wanted_repo, available_channel): - rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --add --channel=%s" % available_channel, check_rc=True) - -def main(): - - # Read system RHN configuration - rhn = Rhn() - - module = AnsibleModule( - argument_spec = dict( - state = dict(default='present', choices=['present', 'absent']), - username = dict(default=None, required=False), - password = dict(default=None, required=False), - server_url = dict(default=rhn.config.get_option('serverURL'), required=False), - activationkey = dict(default=None, required=False), - enable_eus = dict(default=False, type='bool'), - channels = dict(default=[], type='list'), - ) - ) - - state = module.params['state'] - rhn.username = module.params['username'] - rhn.password = module.params['password'] - rhn.configure(module.params['server_url']) - activationkey = module.params['activationkey'] - channels = module.params['channels'] - rhn.module = module - - # Ensure system is registered - if state == 'present': - - # Check for missing parameters ... - if not (activationkey or rhn.username or rhn.password): - module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username, rhn.password)) - if not activationkey and not (rhn.username and rhn.password): - module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password") - - # Register system - if rhn.is_registered: - module.exit_json(changed=False, msg="System already registered.") - else: - try: - rhn.enable() - rhn.register(module.params['enable_eus'] == True, activationkey) - rhn.subscribe(channels) - except Exception, e: - module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, e)) - - module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname) - - # Ensure system is *not* registered - if state == 'absent': - if not rhn.is_registered: - module.exit_json(changed=False, msg="System already unregistered.") - else: - try: - rhn.unregister() - except Exception, e: - module.fail_json(msg="Failed to unregister: %s" % e) - - module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname) - - -main() diff --git a/library/packaging/rpm_key b/library/packaging/rpm_key deleted file mode 100644 index f132d552506..00000000000 --- a/library/packaging/rpm_key +++ /dev/null @@ -1,206 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Ansible module to import third party repo keys to your rpm db -# (c) 2013, Héctor Acosta -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rpm_key -author: Hector Acosta -short_description: Adds or removes a gpg key from the rpm db -description: - - Adds or removes (rpm --import) a gpg key to your rpm database. -version_added: "1.3" -options: - key: - required: true - default: null - aliases: [] - description: - - Key that will be modified. Can be a url, a file, or a keyid if the key already exists in the database. - state: - required: false - default: "present" - choices: [present, absent] - description: - - Wheather the key will be imported or removed from the rpm db. - validate_certs: - description: - - If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - -''' - -EXAMPLES = ''' -# Example action to import a key from a url -- rpm_key: state=present key=http://apt.sw.be/RPM-GPG-KEY.dag.txt - -# Example action to import a key from a file -- rpm_key: state=present key=/path/to/key.gpg - -# Example action to ensure a key is not present in the db -- rpm_key: state=absent key=DEADB33F -''' -import syslog -import os.path -import re -import tempfile - -def is_pubkey(string): - """Verifies if string is a pubkey""" - pgp_regex = ".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*" - return re.match(pgp_regex, string, re.DOTALL) - -class RpmKey: - - def __init__(self, module): - self.syslogging = False - # If the key is a url, we need to check if it's present to be idempotent, - # to do that, we need to check the keyid, which we can get from the armor. - keyfile = None - should_cleanup_keyfile = False - self.module = module - self.rpm = self.module.get_bin_path('rpm', True) - state = module.params['state'] - key = module.params['key'] - - if '://' in key: - keyfile = self.fetch_key(key) - keyid = self.getkeyid(keyfile) - should_cleanup_keyfile = True - elif self.is_keyid(key): - keyid = key - elif os.path.isfile(key): - keyfile = key - keyid = self.getkeyid(keyfile) - else: - self.module.fail_json(msg="Not a valid key %s" % key) - keyid = self.normalize_keyid(keyid) - - if state == 'present': - if self.is_key_imported(keyid): - module.exit_json(changed=False) - else: - if not keyfile: - self.module.fail_json(msg="When importing a key, a valid file must be given") - self.import_key(keyfile, dryrun=module.check_mode) - if should_cleanup_keyfile: - self.module.cleanup(keyfile) - module.exit_json(changed=True) - else: - if self.is_key_imported(keyid): - self.drop_key(keyid, dryrun=module.check_mode) - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - - - def fetch_key(self, url): - """Downloads a key from url, returns a valid path to a gpg key""" - try: - rsp, info = fetch_url(self.module, url) - key = rsp.read() - if not is_pubkey(key): - self.module.fail_json(msg="Not a public key: %s" % url) - tmpfd, tmpname = tempfile.mkstemp() - tmpfile = os.fdopen(tmpfd, "w+b") - tmpfile.write(key) - tmpfile.close() - return tmpname - except urllib2.URLError, e: - self.module.fail_json(msg=str(e)) - - def normalize_keyid(self, keyid): - """Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is lowercase""" - ret = keyid.strip().lower() - if ret.startswith('0x'): - return ret[2:] - elif ret.startswith('0X'): - return ret[2:] - else: - return ret - - def getkeyid(self, keyfile): - gpg = self.module.get_bin_path('gpg', True) - stdout, stderr = self.execute_command([gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', '--list-packets', keyfile]) - for line in stdout.splitlines(): - line = line.strip() - if line.startswith(':signature packet:'): - # We want just the last 8 characters of the keyid - keyid = line.split()[-1].strip()[8:] - return keyid - self.json_fail(msg="Unexpected gpg output") - - def is_keyid(self, keystr): - """Verifies if a key, as provided by the user is a keyid""" - return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE) - - def execute_command(self, cmd): - if self.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd)) - rc, stdout, stderr = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg=stderr) - return stdout, stderr - - def is_key_imported(self, keyid): - stdout, stderr = self.execute_command([self.rpm, '-qa', 'gpg-pubkey']) - for line in stdout.splitlines(): - line = line.strip() - if not line: - continue - match = re.match('gpg-pubkey-([0-9a-f]+)-([0-9a-f]+)', line) - if not match: - self.module.fail_json(msg="rpm returned unexpected output [%s]" % line) - else: - if keyid == match.group(1): - return True - return False - - def import_key(self, keyfile, dryrun=False): - if not dryrun: - self.execute_command([self.rpm, '--import', keyfile]) - - def drop_key(self, key, dryrun=False): - if not dryrun: - self.execute_command([self.rpm, '--erase', '--allmatches', "gpg-pubkey-%s" % key]) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - state=dict(default='present', choices=['present', 'absent'], type='str'), - key=dict(required=True, type='str'), - validate_certs=dict(default='yes', type='bool'), - ), - supports_check_mode=True - ) - - RpmKey(module) - - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() diff --git a/library/packaging/svr4pkg b/library/packaging/svr4pkg deleted file mode 100644 index e95d4d8643f..00000000000 --- a/library/packaging/svr4pkg +++ /dev/null @@ -1,234 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Boyd Adamson -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: svr4pkg -short_description: Manage Solaris SVR4 packages -description: - - Manages SVR4 packages on Solaris 10 and 11. - - These were the native packages on Solaris <= 10 and are available - as a legacy feature in Solaris 11. - - Note that this is a very basic packaging system. It will not enforce - dependencies on install or remove. -version_added: "0.9" -author: Boyd Adamson -options: - name: - description: - - Package name, e.g. C(SUNWcsr) - required: true - - state: - description: - - Whether to install (C(present)), or remove (C(absent)) a package. - - If the package is to be installed, then I(src) is required. - - The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package. - required: true - choices: ["present", "absent"] - - src: - description: - - Specifies the location to install the package from. Required when C(state=present). - - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)." - - If using a file or directory, they must already be accessible by the host. See the M(copy) module for a way to get them there. - proxy: - description: - - HTTP[s] proxy to be used if C(src) is a URL. - response_file: - description: - - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4) - required: false - zone: - description: - - Whether to install the package only in the current zone, or install it into all zones. - - The installation into all zones works only if you are working with the global zone. - required: false - default: "all" - choices: ["current", "all"] - version_added: "1.6" - category: - description: - - Install/Remove category instead of a single package. - required: false - choices: ["true", "false"] - version_added: "1.6" -''' - -EXAMPLES = ''' -# Install a package from an already copied file -- svr4pkg: name=CSWcommon src=/tmp/cswpkgs.pkg state=present - -# Install a package directly from an http site -- svr4pkg: name=CSWpkgutil src=http://get.opencsw.org/now state=present zone=current - -# Install a package with a response file -- svr4pkg: name=CSWggrep src=/tmp/third-party.pkg response_file=/tmp/ggrep.response state=present - -# Ensure that a package is not installed. -- svr4pkg: name=SUNWgnome-sound-recorder state=absent - -# Ensure that a category is not installed. -- svr4pkg: name=FIREFOX state=absent category=true -''' - - -import os -import tempfile - -def package_installed(module, name, category): - cmd = [module.get_bin_path('pkginfo', True)] - cmd.append('-q') - if category: - cmd.append('-c') - cmd.append(name) - rc, out, err = module.run_command(' '.join(cmd)) - if rc == 0: - return True - else: - return False - -def create_admin_file(): - (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True) - fullauto = ''' -mail= -instance=unique -partial=nocheck -runlevel=quit -idepend=nocheck -rdepend=nocheck -space=quit -setuid=nocheck -conflict=nocheck -action=nocheck -networktimeout=60 -networkretries=3 -authentication=quit -keystore=/var/sadm/security -proxy= -basedir=default -''' - os.write(desc, fullauto) - os.close(desc) - return filename - -def run_command(module, cmd): - progname = cmd[0] - cmd[0] = module.get_bin_path(progname, True) - return module.run_command(cmd) - -def package_install(module, name, src, proxy, response_file, zone, category): - adminfile = create_admin_file() - cmd = [ 'pkgadd', '-n'] - if zone == 'current': - cmd += [ '-G' ] - cmd += [ '-a', adminfile, '-d', src ] - if proxy is not None: - cmd += [ '-x', proxy ] - if response_file is not None: - cmd += [ '-r', response_file ] - if category: - cmd += [ '-Y' ] - cmd.append(name) - (rc, out, err) = run_command(module, cmd) - os.unlink(adminfile) - return (rc, out, err) - -def package_uninstall(module, name, src, category): - adminfile = create_admin_file() - if category: - cmd = [ 'pkgrm', '-na', adminfile, '-Y', name ] - else: - cmd = [ 'pkgrm', '-na', adminfile, name] - (rc, out, err) = run_command(module, cmd) - os.unlink(adminfile) - return (rc, out, err) - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required = True), - state = dict(required = True, choices=['present', 'absent']), - src = dict(default = None), - proxy = dict(default = None), - response_file = dict(default = None), - zone = dict(required=False, default = 'all', choices=['current','all']), - category = dict(default=False, type='bool') - ), - supports_check_mode=True - ) - state = module.params['state'] - name = module.params['name'] - src = module.params['src'] - proxy = module.params['proxy'] - response_file = module.params['response_file'] - zone = module.params['zone'] - category = module.params['category'] - rc = None - out = '' - err = '' - result = {} - result['name'] = name - result['state'] = state - - if state == 'present': - if src is None: - module.fail_json(name=name, - msg="src is required when state=present") - if not package_installed(module, name, category): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category) - # Stdout is normally empty but for some packages can be - # very long and is not often useful - if len(out) > 75: - out = out[:75] + '...' - - elif state == 'absent': - if package_installed(module, name, category): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = package_uninstall(module, name, src, category) - out = out[:75] - - # Success, Warning, Interruption, Reboot all, Reboot this return codes - if rc in (0, 2, 3, 10, 20): - result['changed'] = True - # no install nor uninstall, or failed - else: - result['changed'] = False - - # Fatal error, Administration, Administration Interaction return codes - if rc in (1, 4 , 5): - result['failed'] = True - else: - result['failed'] = False - - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/swdepot b/library/packaging/swdepot deleted file mode 100644 index b41a860531f..00000000000 --- a/library/packaging/swdepot +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/python -tt -# -*- coding: utf-8 -*- - -# (c) 2013, Raul Melo -# Written by Raul Melo -# Based on yum module written by Seth Vidal -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -import re -import pipes - -DOCUMENTATION = ''' ---- -module: swdepot -short_description: Manage packages with swdepot package manager (HP-UX) -description: - - Will install, upgrade and remove packages with swdepot package manager (HP-UX) -version_added: "1.4" -notes: [] -author: Raul Melo -options: - name: - description: - - package name. - required: true - default: null - choices: [] - aliases: [] - version_added: 1.4 - state: - description: - - whether to install (C(present), C(latest)), or remove (C(absent)) a package. - required: true - default: null - choices: [ 'present', 'latest', 'absent'] - aliases: [] - version_added: 1.4 - depot: - description: - - The source repository from which install or upgrade a package. - required: false - default: null - choices: [] - aliases: [] - version_added: 1.4 -''' - -EXAMPLES = ''' -- swdepot: name=unzip-6.0 state=installed depot=repository:/path -- swdepot: name=unzip state=latest depot=repository:/path -- swdepot: name=unzip state=absent -''' - -def compare_package(version1, version2): - """ Compare version packages. - Return values: - -1 first minor - 0 equal - 1 fisrt greater """ - - def normalize(v): - return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")] - return cmp(normalize(version1), normalize(version2)) - -def query_package(module, name, depot=None): - """ Returns whether a package is installed or not and version. """ - - cmd_list = '/usr/sbin/swlist -a revision -l product' - if depot: - rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, pipes.quote(depot), pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True) - else: - rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True) - if rc == 0: - version = re.sub("\s\s+|\t" , " ", stdout).strip().split()[1] - else: - version = None - - return rc, version - -def remove_package(module, name): - """ Uninstall package if installed. """ - - cmd_remove = '/usr/sbin/swremove' - rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name)) - - if rc == 0: - return rc, stdout - else: - return rc, stderr - -def install_package(module, depot, name): - """ Install package if not already installed """ - - cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false' - rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name)) - if rc == 0: - return rc, stdout - else: - return rc, stderr - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(aliases=['pkg'], required=True), - state = dict(choices=['present', 'absent', 'latest'], required=True), - depot = dict(default=None, required=False) - ), - supports_check_mode=True - ) - name = module.params['name'] - state = module.params['state'] - depot = module.params['depot'] - - changed = False - msg = "No changed" - rc = 0 - if ( state == 'present' or state == 'latest' ) and depot == None: - output = "depot parameter is mandatory in present or latest task" - module.fail_json(name=name, msg=output, rc=rc) - - - #Check local version - rc, version_installed = query_package(module, name) - if not rc: - installed = True - msg = "Already installed" - - else: - installed = False - - if ( state == 'present' or state == 'latest' ) and installed == False: - if module.check_mode: - module.exit_json(changed=True) - rc, output = install_package(module, depot, name) - - if not rc: - changed = True - msg = "Packaged installed" - - else: - module.fail_json(name=name, msg=output, rc=rc) - - elif state == 'latest' and installed == True: - #Check depot version - rc, version_depot = query_package(module, name, depot) - - if not rc: - if compare_package(version_installed,version_depot) == -1: - if module.check_mode: - module.exit_json(changed=True) - #Install new version - rc, output = install_package(module, depot, name) - - if not rc: - msg = "Packge upgraded, Before " + version_installed + " Now " + version_depot - changed = True - - else: - module.fail_json(name=name, msg=output, rc=rc) - - else: - output = "Software package not in repository " + depot - module.fail_json(name=name, msg=output, rc=rc) - - elif state == 'absent' and installed == True: - if module.check_mode: - module.exit_json(changed=True) - rc, output = remove_package(module, name) - if not rc: - changed = True - msg = "Package removed" - else: - module.fail_json(name=name, msg=output, rc=rc) - - if module.check_mode: - module.exit_json(changed=False) - - module.exit_json(changed=changed, name=name, state=state, msg=msg) - -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/packaging/urpmi b/library/packaging/urpmi deleted file mode 100644 index a42ee7b87fc..00000000000 --- a/library/packaging/urpmi +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/python -tt -# -*- coding: utf-8 -*- - -# (c) 2013, Philippe Makowski -# Written by Philippe Makowski -# Based on apt module written by Matthew Williams -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - - -DOCUMENTATION = ''' ---- -module: urpmi -short_description: Urpmi manager -description: - - Manages packages with I(urpmi) (such as for Mageia or Mandriva) -version_added: "1.3.4" -options: - pkg: - description: - - name of package to install, upgrade or remove. - required: true - default: null - state: - description: - - Indicates the desired package state - required: false - default: present - choices: [ "absent", "present" ] - update_cache: - description: - - update the package database first C(urpmi.update -a). - required: false - default: no - choices: [ "yes", "no" ] - no-suggests: - description: - - Corresponds to the C(--no-suggests) option for I(urpmi). - required: false - default: yes - choices: [ "yes", "no" ] - force: - description: - - Corresponds to the C(--force) option for I(urpmi). - required: false - default: yes - choices: [ "yes", "no" ] -author: Philippe Makowski -notes: [] -''' - -EXAMPLES = ''' -# install package foo -- urpmi: pkg=foo state=present -# remove package foo -- urpmi: pkg=foo state=absent -# description: remove packages foo and bar -- urpmi: pkg=foo,bar state=absent -# description: update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists) -- urpmi: name=bar, state=present, update_cache=yes -''' - - -import json -import shlex -import os -import sys - -URPMI_PATH = '/usr/sbin/urpmi' -URPME_PATH = '/usr/sbin/urpme' - -def query_package(module, name): - # rpm -q returns 0 if the package is installed, - # 1 if it is not installed - cmd = "rpm -q %s" % (name) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - if rc == 0: - return True - else: - return False - -def query_package_provides(module, name): - # rpm -q returns 0 if the package is installed, - # 1 if it is not installed - cmd = "rpm -q --provides %s" % (name) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - return rc == 0 - - -def update_package_db(module): - cmd = "urpmi.update -a -q" - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - if rc != 0: - module.fail_json(msg="could not update package db") - - -def remove_packages(module, packages): - - remove_c = 0 - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, package): - continue - - cmd = "%s --auto %s" % (URPME_PATH, package) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc != 0: - module.fail_json(msg="failed to remove %s" % (package)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, pkgspec, force=True, no_suggests=True): - - packages = "" - for package in pkgspec: - if not query_package_provides(module, package): - packages += "'%s' " % package - - if len(packages) != 0: - if no_suggests: - no_suggests_yes = '--no-suggests' - else: - no_suggests_yes = '' - - if force: - force_yes = '--force' - else: - force_yes = '' - - cmd = ("%s --auto %s --quiet %s %s" % (URPMI_PATH, force_yes, no_suggests_yes, packages)) - - rc, out, err = module.run_command(cmd) - - installed = True - for packages in pkgspec: - if not query_package_provides(module, package): - installed = False - - # urpmi always have 0 for exit code if --force is used - if rc or not installed: - module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err)) - else: - module.exit_json(changed=True, msg="%s present(s)" % packages) - else: - module.exit_json(changed=False) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']), - update_cache = dict(default=False, aliases=['update-cache'], type='bool'), - force = dict(default=True, type='bool'), - no_suggests = dict(default=True, aliases=['no-suggests'], type='bool'), - package = dict(aliases=['pkg', 'name'], required=True))) - - - if not os.path.exists(URPMI_PATH): - module.fail_json(msg="cannot find urpmi, looking for %s" % (URPMI_PATH)) - - p = module.params - - force_yes = p['force'] - no_suggest_yes = p['no_suggests'] - - if p['update_cache']: - update_package_db(module) - - packages = p['package'].split(',') - - if p['state'] in [ 'installed', 'present' ]: - install_packages(module, packages, force_yes, no_suggest_yes) - - elif p['state'] in [ 'removed', 'absent' ]: - remove_packages(module, packages) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/yum b/library/packaging/yum deleted file mode 100644 index 245d3b8020a..00000000000 --- a/library/packaging/yum +++ /dev/null @@ -1,838 +0,0 @@ -#!/usr/bin/python -tt -# -*- coding: utf-8 -*- - -# (c) 2012, Red Hat, Inc -# Written by Seth Vidal -# (c) 2014, Epic Games, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - - -import traceback -import os -import yum - -try: - from yum.misc import find_unfinished_transactions, find_ts_remaining - from rpmUtils.miscutils import splitFilename - transaction_helpers = True -except: - transaction_helpers = False - -DOCUMENTATION = ''' ---- -module: yum -version_added: historical -short_description: Manages packages with the I(yum) package manager -description: - - Installs, upgrade, removes, and lists packages and groups with the I(yum) package manager. -options: - name: - description: - - "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: yum -y update. You can also pass a url or a local path to a rpm file." - required: true - default: null - aliases: [] - list: - description: - - Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples. - required: false - default: null - state: - description: - - Whether to install (C(present), C(latest)), or remove (C(absent)) a package. - required: false - choices: [ "present", "latest", "absent" ] - default: "present" - enablerepo: - description: - - I(Repoid) of repositories to enable for the install/update operation. - These repos will not persist beyond the transaction. - When specifying multiple repos, separate them with a ",". - required: false - version_added: "0.9" - default: null - aliases: [] - - disablerepo: - description: - - I(Repoid) of repositories to disable for the install/update operation. - These repos will not persist beyond the transaction. - When specifying multiple repos, separate them with a ",". - required: false - version_added: "0.9" - default: null - aliases: [] - - conf_file: - description: - - The remote yum configuration file to use for the transaction. - required: false - version_added: "0.6" - default: null - aliases: [] - - disable_gpg_check: - description: - - Whether to disable the GPG checking of signatures of packages being - installed. Has an effect only if state is I(present) or I(latest). - required: false - version_added: "1.2" - default: "no" - choices: ["yes", "no"] - aliases: [] - -notes: [] -# informational: requirements for nodes -requirements: [ yum, rpm ] -author: Seth Vidal -''' - -EXAMPLES = ''' -- name: install the latest version of Apache - yum: name=httpd state=latest - -- name: remove the Apache package - yum: name=httpd state=absent - -- name: install the latest version of Apache from the testing repo - yum: name=httpd enablerepo=testing state=present - -- name: upgrade all packages - yum: name=* state=latest - -- name: install the nginx rpm from a remote repo - yum: name=http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present - -- name: install nginx rpm from a local file - yum: name=/usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present - -- name: install the 'Development tools' package group - yum: name="@Development tools" state=present -''' - -def_qf = "%{name}-%{version}-%{release}.%{arch}" - -repoquery='/usr/bin/repoquery' -if not os.path.exists(repoquery): - repoquery = None - -yumbin='/usr/bin/yum' - -import syslog - -def log(msg): - syslog.openlog('ansible-yum', 0, syslog.LOG_USER) - syslog.syslog(syslog.LOG_NOTICE, msg) - -def yum_base(conf_file=None, cachedir=False): - - my = yum.YumBase() - my.preconf.debuglevel=0 - my.preconf.errorlevel=0 - if conf_file and os.path.exists(conf_file): - my.preconf.fn = conf_file - if cachedir or os.geteuid() != 0: - if hasattr(my, 'setCacheDir'): - my.setCacheDir() - else: - cachedir = yum.misc.getCacheDir() - my.repos.setCacheDir(cachedir) - my.conf.cache = 0 - - return my - -def install_yum_utils(module): - - if not module.check_mode: - yum_path = module.get_bin_path('yum') - if yum_path: - rc, so, se = module.run_command('%s -y install yum-utils' % yum_path) - if rc == 0: - this_path = module.get_bin_path('repoquery') - global repoquery - repoquery = this_path - -def po_to_nevra(po): - - if hasattr(po, 'ui_nevra'): - return po.ui_nevra - else: - return '%s-%s-%s.%s' % (po.name, po.version, po.release, po.arch) - -def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[], is_pkg=False): - - if not repoq: - - pkgs = [] - try: - my = yum_base(conf_file) - for rid in en_repos: - my.repos.enableRepo(rid) - for rid in dis_repos: - my.repos.disableRepo(rid) - - e,m,u = my.rpmdb.matchPackageNames([pkgspec]) - pkgs = e + m - if not pkgs: - pkgs.extend(my.returnInstalledPackagesByDep(pkgspec)) - except Exception, e: - module.fail_json(msg="Failure talking to yum: %s" % e) - - return [ po_to_nevra(p) for p in pkgs ] - - else: - - cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, pkgspec] - rc,out,err = module.run_command(cmd) - if not is_pkg: - cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, "--whatprovides", pkgspec] - rc2,out2,err2 = module.run_command(cmd) - else: - rc2,out2,err2 = (0, '', '') - - if rc == 0 and rc2 == 0: - out += out2 - return [ p for p in out.split('\n') if p.strip() ] - else: - module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2)) - - return [] - -def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]): - - if not repoq: - - pkgs = [] - try: - my = yum_base(conf_file) - for rid in en_repos: - my.repos.enableRepo(rid) - for rid in dis_repos: - my.repos.disableRepo(rid) - - e,m,u = my.pkgSack.matchPackageNames([pkgspec]) - pkgs = e + m - if not pkgs: - pkgs.extend(my.returnPackagesByDep(pkgspec)) - except Exception, e: - module.fail_json(msg="Failure talking to yum: %s" % e) - - return [ po_to_nevra(p) for p in pkgs ] - - else: - myrepoq = list(repoq) - - for repoid in dis_repos: - r_cmd = ['--disablerepo', repoid] - myrepoq.extend(r_cmd) - - for repoid in en_repos: - r_cmd = ['--enablerepo', repoid] - myrepoq.extend(r_cmd) - - cmd = myrepoq + ["--qf", qf, pkgspec] - rc,out,err = module.run_command(cmd) - if rc == 0: - return [ p for p in out.split('\n') if p.strip() ] - else: - module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err)) - - - return [] - -def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]): - - if not repoq: - - retpkgs = [] - pkgs = [] - updates = [] - - try: - my = yum_base(conf_file) - for rid in en_repos: - my.repos.enableRepo(rid) - for rid in dis_repos: - my.repos.disableRepo(rid) - - pkgs = my.returnPackagesByDep(pkgspec) + my.returnInstalledPackagesByDep(pkgspec) - if not pkgs: - e,m,u = my.pkgSack.matchPackageNames([pkgspec]) - pkgs = e + m - updates = my.doPackageLists(pkgnarrow='updates').updates - except Exception, e: - module.fail_json(msg="Failure talking to yum: %s" % e) - - for pkg in pkgs: - if pkg in updates: - retpkgs.append(pkg) - - return set([ po_to_nevra(p) for p in retpkgs ]) - - else: - myrepoq = list(repoq) - for repoid in dis_repos: - r_cmd = ['--disablerepo', repoid] - myrepoq.extend(r_cmd) - - for repoid in en_repos: - r_cmd = ['--enablerepo', repoid] - myrepoq.extend(r_cmd) - - cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec] - rc,out,err = module.run_command(cmd) - - if rc == 0: - return set([ p for p in out.split('\n') if p.strip() ]) - else: - module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err)) - - return [] - -def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]): - - if not repoq: - - pkgs = [] - try: - my = yum_base(conf_file) - for rid in en_repos: - my.repos.enableRepo(rid) - for rid in dis_repos: - my.repos.disableRepo(rid) - - pkgs = my.returnPackagesByDep(req_spec) + my.returnInstalledPackagesByDep(req_spec) - if not pkgs: - e,m,u = my.pkgSack.matchPackageNames([req_spec]) - pkgs.extend(e) - pkgs.extend(m) - e,m,u = my.rpmdb.matchPackageNames([req_spec]) - pkgs.extend(e) - pkgs.extend(m) - except Exception, e: - module.fail_json(msg="Failure talking to yum: %s" % e) - - return set([ po_to_nevra(p) for p in pkgs ]) - - else: - myrepoq = list(repoq) - for repoid in dis_repos: - r_cmd = ['--disablerepo', repoid] - myrepoq.extend(r_cmd) - - for repoid in en_repos: - r_cmd = ['--enablerepo', repoid] - myrepoq.extend(r_cmd) - - cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec] - rc,out,err = module.run_command(cmd) - cmd = myrepoq + ["--qf", qf, req_spec] - rc2,out2,err2 = module.run_command(cmd) - if rc == 0 and rc2 == 0: - out += out2 - pkgs = set([ p for p in out.split('\n') if p.strip() ]) - if not pkgs: - pkgs = is_installed(module, repoq, req_spec, conf_file, qf=qf) - return pkgs - else: - module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2)) - - return [] - -def transaction_exists(pkglist): - """ - checks the package list to see if any packages are - involved in an incomplete transaction - """ - - conflicts = [] - if not transaction_helpers: - return conflicts - - # first, we create a list of the package 'nvreas' - # so we can compare the pieces later more easily - pkglist_nvreas = [] - for pkg in pkglist: - pkglist_nvreas.append(splitFilename(pkg)) - - # next, we build the list of packages that are - # contained within an unfinished transaction - unfinished_transactions = find_unfinished_transactions() - for trans in unfinished_transactions: - steps = find_ts_remaining(trans) - for step in steps: - # the action is install/erase/etc., but we only - # care about the package spec contained in the step - (action, step_spec) = step - (n,v,r,e,a) = splitFilename(step_spec) - # and see if that spec is in the list of packages - # requested for installation/updating - for pkg in pkglist_nvreas: - # if the name and arch match, we're going to assume - # this package is part of a pending transaction - # the label is just for display purposes - label = "%s-%s" % (n,a) - if n == pkg[0] and a == pkg[4]: - if label not in conflicts: - conflicts.append("%s-%s" % (n,a)) - break - return conflicts - -def local_nvra(module, path): - """return nvra of a local rpm passed in""" - - cmd = ['/bin/rpm', '-qp' ,'--qf', - '%{name}-%{version}-%{release}.%{arch}\n', path ] - rc, out, err = module.run_command(cmd) - if rc != 0: - return None - nvra = out.split('\n')[0] - return nvra - -def pkg_to_dict(pkgstr): - - if pkgstr.strip(): - n,e,v,r,a,repo = pkgstr.split('|') - else: - return {'error_parsing': pkgstr} - - d = { - 'name':n, - 'arch':a, - 'epoch':e, - 'release':r, - 'version':v, - 'repo':repo, - 'nevra': '%s:%s-%s-%s.%s' % (e,n,v,r,a) - } - - if repo == 'installed': - d['yumstate'] = 'installed' - else: - d['yumstate'] = 'available' - - return d - -def repolist(module, repoq, qf="%{repoid}"): - - cmd = repoq + ["--qf", qf, "-a"] - rc,out,err = module.run_command(cmd) - ret = [] - if rc == 0: - ret = set([ p for p in out.split('\n') if p.strip() ]) - return ret - -def list_stuff(module, conf_file, stuff): - - qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}" - repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet', '-q'] - if conf_file and os.path.exists(conf_file): - repoq += ['-c', conf_file] - - if stuff == 'installed': - return [ pkg_to_dict(p) for p in is_installed(module, repoq, '-a', conf_file, qf=qf) if p.strip() ] - elif stuff == 'updates': - return [ pkg_to_dict(p) for p in is_update(module, repoq, '-a', conf_file, qf=qf) if p.strip() ] - elif stuff == 'available': - return [ pkg_to_dict(p) for p in is_available(module, repoq, '-a', conf_file, qf=qf) if p.strip() ] - elif stuff == 'repos': - return [ dict(repoid=name, state='enabled') for name in repolist(module, repoq) if name.strip() ] - else: - return [ pkg_to_dict(p) for p in is_installed(module, repoq, stuff, conf_file, qf=qf) + is_available(module, repoq, stuff, conf_file, qf=qf) if p.strip() ] - -def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): - - res = {} - res['results'] = [] - res['msg'] = '' - res['rc'] = 0 - res['changed'] = False - - for spec in items: - pkg = None - - # check if pkgspec is installed (if possible for idempotence) - # localpkg - if spec.endswith('.rpm') and '://' not in spec: - # get the pkg name-v-r.arch - if not os.path.exists(spec): - res['msg'] += "No Package file matching '%s' found on system" % spec - module.fail_json(**res) - - nvra = local_nvra(module, spec) - # look for them in the rpmdb - if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos): - # if they are there, skip it - continue - pkg = spec - - # URL - elif '://' in spec: - pkg = spec - - #groups :( - elif spec.startswith('@'): - # complete wild ass guess b/c it's a group - pkg = spec - - # range requires or file-requires or pkgname :( - else: - # most common case is the pkg is already installed and done - # short circuit all the bs - and search for it as a pkg in is_installed - # if you find it then we're done - if not set(['*','?']).intersection(set(spec)): - pkgs = is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True) - if pkgs: - res['results'].append('%s providing %s is already installed' % (pkgs[0], spec)) - continue - - # look up what pkgs provide this - pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos) - if not pkglist: - res['msg'] += "No Package matching '%s' found available, installed or updated" % spec - module.fail_json(**res) - - # if any of the packages are involved in a transaction, fail now - # so that we don't hang on the yum operation later - conflicts = transaction_exists(pkglist) - if len(conflicts) > 0: - res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts) - module.fail_json(**res) - - # if any of them are installed - # then nothing to do - - found = False - for this in pkglist: - if is_installed(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True): - found = True - res['results'].append('%s providing %s is already installed' % (this, spec)) - break - - # if the version of the pkg you have installed is not in ANY repo, but there are - # other versions in the repos (both higher and lower) then the previous checks won't work. - # so we check one more time. This really only works for pkgname - not for file provides or virt provides - # but virt provides should be all caught in what_provides on its own. - # highly irritating - if not found: - if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos): - found = True - res['results'].append('package providing %s is already installed' % (spec)) - - if found: - continue - - # if not - then pass in the spec as what to install - # we could get here if nothing provides it but that's not - # the error we're catching here - pkg = spec - - cmd = yum_basecmd + ['install', pkg] - - if module.check_mode: - module.exit_json(changed=True) - - changed = True - - rc, out, err = module.run_command(cmd) - - # Fail on invalid urls: - if (rc == 1 and '://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)): - err = 'Package at %s could not be installed' % spec - module.fail_json(changed=False,msg=err,rc=1) - elif (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out: - # avoid failing in the 'Nothing To Do' case - # this may happen with an URL spec. - # for an already installed group, - # we get rc = 0 and 'Nothing to do' in out, not in err. - rc = 0 - err = '' - out = '%s: Nothing to do' % spec - changed = False - - res['rc'] += rc - res['results'].append(out) - res['msg'] += err - - # FIXME - if we did an install - go and check the rpmdb to see if it actually installed - # look for the pkg in rpmdb - # look for the pkg via obsoletes - - # accumulate any changes - res['changed'] |= changed - - module.exit_json(**res) - - -def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): - - res = {} - res['results'] = [] - res['msg'] = '' - res['changed'] = False - res['rc'] = 0 - - for pkg in items: - is_group = False - # group remove - this is doom on a stick - if pkg.startswith('@'): - is_group = True - else: - if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos): - res['results'].append('%s is not installed' % pkg) - continue - - # run an actual yum transaction - cmd = yum_basecmd + ["remove", pkg] - - if module.check_mode: - module.exit_json(changed=True) - - rc, out, err = module.run_command(cmd) - - res['rc'] += rc - res['results'].append(out) - res['msg'] += err - - # compile the results into one batch. If anything is changed - # then mark changed - # at the end - if we've end up failed then fail out of the rest - # of the process - - # at this point we should check to see if the pkg is no longer present - - if not is_group: # we can't sensibly check for a group being uninstalled reliably - # look to see if the pkg shows up from is_installed. If it doesn't - if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos): - res['changed'] = True - else: - module.fail_json(**res) - - if rc != 0: - module.fail_json(**res) - - module.exit_json(**res) - -def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): - - res = {} - res['results'] = [] - res['msg'] = '' - res['changed'] = False - res['rc'] = 0 - - for spec in items: - - pkg = None - basecmd = 'update' - cmd = '' - # groups, again - if spec.startswith('@'): - pkg = spec - - elif spec == '*': #update all - # use check-update to see if there is any need - rc,out,err = module.run_command(yum_basecmd + ['check-update']) - if rc == 100: - cmd = yum_basecmd + [basecmd] - else: - res['results'].append('All packages up to date') - continue - - # dep/pkgname - find it - else: - if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos): - basecmd = 'update' - else: - basecmd = 'install' - - pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos) - if not pkglist: - res['msg'] += "No Package matching '%s' found available, installed or updated" % spec - module.fail_json(**res) - - nothing_to_do = True - for this in pkglist: - if basecmd == 'install' and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): - nothing_to_do = False - break - - if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=en_repos): - nothing_to_do = False - break - - if nothing_to_do: - res['results'].append("All packages providing %s are up to date" % spec) - continue - - # if any of the packages are involved in a transaction, fail now - # so that we don't hang on the yum operation later - conflicts = transaction_exists(pkglist) - if len(conflicts) > 0: - res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts) - module.fail_json(**res) - - pkg = spec - if not cmd: - cmd = yum_basecmd + [basecmd, pkg] - - if module.check_mode: - return module.exit_json(changed=True) - - rc, out, err = module.run_command(cmd) - - res['rc'] += rc - res['results'].append(out) - res['msg'] += err - - # FIXME if it is - update it and check to see if it applied - # check to see if there is no longer an update available for the pkgspec - - if rc: - res['failed'] = True - else: - res['changed'] = True - - module.exit_json(**res) - -def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo, - disable_gpg_check): - - # take multiple args comma separated - items = pkgspec.split(',') - - # need debug level 2 to get 'Nothing to do' for groupinstall. - yum_basecmd = [yumbin, '-d', '2', '-y'] - - - if not repoquery: - repoq = None - else: - repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet', '-q'] - - if conf_file and os.path.exists(conf_file): - yum_basecmd += ['-c', conf_file] - if repoq: - repoq += ['-c', conf_file] - - dis_repos =[] - en_repos = [] - if disablerepo: - dis_repos = disablerepo.split(',') - if enablerepo: - en_repos = enablerepo.split(',') - - for repoid in dis_repos: - r_cmd = ['--disablerepo=%s' % repoid] - yum_basecmd.extend(r_cmd) - - for repoid in en_repos: - r_cmd = ['--enablerepo=%s' % repoid] - yum_basecmd.extend(r_cmd) - - if state in ['installed', 'present', 'latest']: - my = yum_base(conf_file) - try: - for r in dis_repos: - my.repos.disableRepo(r) - - current_repos = my.repos.repos.keys() - for r in en_repos: - try: - my.repos.enableRepo(r) - new_repos = my.repos.repos.keys() - for i in new_repos: - if not i in current_repos: - rid = my.repos.getRepo(i) - a = rid.repoXML.repoid - current_repos = new_repos - except yum.Errors.YumBaseError, e: - module.fail_json(msg="Error setting/accessing repo %s: %s" % (r, e)) - except yum.Errors.YumBaseError, e: - module.fail_json(msg="Error accessing repos: %s" % e) - - if state in ['installed', 'present']: - if disable_gpg_check: - yum_basecmd.append('--nogpgcheck') - install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos) - elif state in ['removed', 'absent']: - remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos) - elif state == 'latest': - if disable_gpg_check: - yum_basecmd.append('--nogpgcheck') - latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos) - - # should be caught by AnsibleModule argument_spec - return dict(changed=False, failed=True, results='', errors='unexpected state') - -def main(): - - # state=installed name=pkgspec - # state=removed name=pkgspec - # state=latest name=pkgspec - # - # informational commands: - # list=installed - # list=updates - # list=available - # list=repos - # list=pkgspec - - module = AnsibleModule( - argument_spec = dict( - name=dict(aliases=['pkg']), - # removed==absent, installed==present, these are accepted as aliases - state=dict(default='installed', choices=['absent','present','installed','removed','latest']), - enablerepo=dict(), - disablerepo=dict(), - list=dict(), - conf_file=dict(default=None), - disable_gpg_check=dict(required=False, default="no", type='bool'), - # this should not be needed, but exists as a failsafe - install_repoquery=dict(required=False, default="yes", type='bool'), - ), - required_one_of = [['name','list']], - mutually_exclusive = [['name','list']], - supports_check_mode = True - ) - - # this should not be needed, but exists as a failsafe - params = module.params - if params['install_repoquery'] and not repoquery and not module.check_mode: - install_yum_utils(module) - - if params['list']: - if not repoquery: - module.fail_json(msg="repoquery is required to use list= with this module. Please install the yum-utils package.") - results = dict(results=list_stuff(module, params['conf_file'], params['list'])) - module.exit_json(**results) - - else: - pkg = params['name'] - state = params['state'] - enablerepo = params.get('enablerepo', '') - disablerepo = params.get('disablerepo', '') - disable_gpg_check = params['disable_gpg_check'] - res = ensure(module, state, pkg, params['conf_file'], enablerepo, - disablerepo, disable_gpg_check) - module.fail_json(msg="we should never get here unless this all failed", **res) - -# import module snippets -from ansible.module_utils.basic import * -main() - diff --git a/library/packaging/zypper b/library/packaging/zypper deleted file mode 100644 index 91a87a92ed8..00000000000 --- a/library/packaging/zypper +++ /dev/null @@ -1,237 +0,0 @@ -#!/usr/bin/python -tt -# -*- coding: utf-8 -*- - -# (c) 2013, Patrick Callahan -# based on -# openbsd_pkg -# (c) 2013 -# Patrik Lundin -# -# yum -# (c) 2012, Red Hat, Inc -# Written by Seth Vidal -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import re - -DOCUMENTATION = ''' ---- -module: zypper -author: Patrick Callahan -version_added: "1.2" -short_description: Manage packages on SuSE and openSuSE -description: - - Manage packages on SuSE and openSuSE using the zypper and rpm tools. -options: - name: - description: - - package name or package specifier wth version C(name) or C(name-1.0). - required: true - aliases: [ 'pkg' ] - state: - description: - - C(present) will make sure the package is installed. - C(latest) will make sure the latest version of the package is installed. - C(absent) will make sure the specified package is not installed. - required: false - choices: [ present, latest, absent ] - default: "present" - disable_gpg_check: - description: - - Whether to disable to GPG signature checking of the package - signature being installed. Has an effect only if state is - I(present) or I(latest). - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] - disable_recommends: - version_added: "1.8" - description: - - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does install recommended packages. - required: false - default: "yes" - choices: [ "yes", "no" ] - -notes: [] -# informational: requirements for nodes -requirements: [ zypper, rpm ] -author: Patrick Callahan -''' - -EXAMPLES = ''' -# Install "nmap" -- zypper: name=nmap state=present - -# Install apache2 with recommended packages -- zypper: name=apache2 state=present disable_recommends=no - -# Remove the "nmap" package -- zypper: name=nmap state=absent -''' - -# Function used for getting the name of a currently installed package. -def get_current_name(m, name): - cmd = '/bin/rpm -q --qf \'%{NAME}-%{VERSION}\'' - (rc, stdout, stderr) = m.run_command("%s %s" % (cmd, name)) - - if rc != 0: - return (rc, stdout, stderr) - - syntax = "%s" - - for line in stdout.splitlines(): - if syntax % name in line: - current_name = line.split()[0] - - return current_name - -# Function used to find out if a package is currently installed. -def get_package_state(m, name): - cmd = ['/bin/rpm', '--query', '--info', name] - - rc, stdout, stderr = m.run_command(cmd, check_rc=False) - - if rc == 0: - return True - else: - return False - -# Function used to make sure a package is present. -def package_present(m, name, installed_state, disable_gpg_check, disable_recommends): - if installed_state is False: - cmd = ['/usr/bin/zypper', '--non-interactive'] - # add global options before zypper command - if disable_gpg_check: - cmd.append('--no-gpg-check') - - cmd.extend(['install', '--auto-agree-with-licenses']) - # add install parameter - if disable_recommends: - cmd.append('--no-recommends') - cmd.append(name) - rc, stdout, stderr = m.run_command(cmd, check_rc=False) - - if rc == 0: - changed=True - else: - changed=False - else: - rc = 0 - stdout = '' - stderr = '' - changed=False - - return (rc, stdout, stderr, changed) - -# Function used to make sure a package is the latest available version. -def package_latest(m, name, installed_state, disable_gpg_check, disable_recommends): - - if installed_state is True: - cmd = ['/usr/bin/zypper', '--non-interactive', 'update', '--auto-agree-with-licenses', name] - pre_upgrade_name = '' - post_upgrade_name = '' - - # Compare the installed package before and after to know if we changed anything. - pre_upgrade_name = get_current_name(m, name) - - rc, stdout, stderr = m.run_command(cmd, check_rc=False) - - post_upgrade_name = get_current_name(m, name) - - if pre_upgrade_name == post_upgrade_name: - changed = False - else: - changed = True - - return (rc, stdout, stderr, changed) - - else: - # If package was not installed at all just make it present. - return package_present(m, name, installed_state, disable_gpg_check, disable_recommends) - -# Function used to make sure a package is not installed. -def package_absent(m, name, installed_state): - if installed_state is True: - cmd = ['/usr/bin/zypper', '--non-interactive', 'remove', name] - rc, stdout, stderr = m.run_command(cmd) - - if rc == 0: - changed=True - else: - changed=False - else: - rc = 0 - stdout = '' - stderr = '' - changed=False - - return (rc, stdout, stderr, changed) - -# =========================================== -# Main control flow - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True, aliases=['pkg']), - state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), - disable_gpg_check = dict(required=False, default='no', type='bool'), - disable_recommends = dict(required=False, default='yes', type='bool'), - ), - supports_check_mode = False - ) - - - params = module.params - - name = params['name'] - state = params['state'] - disable_gpg_check = params['disable_gpg_check'] - disable_recommends = params['disable_recommends'] - - rc = 0 - stdout = '' - stderr = '' - result = {} - result['name'] = name - result['state'] = state - - # Get package state - installed_state = get_package_state(module, name) - - # Perform requested action - if state in ['installed', 'present']: - (rc, stdout, stderr, changed) = package_present(module, name, installed_state, disable_gpg_check, disable_recommends) - elif state in ['absent', 'removed']: - (rc, stdout, stderr, changed) = package_absent(module, name, installed_state) - elif state == 'latest': - (rc, stdout, stderr, changed) = package_latest(module, name, installed_state, disable_gpg_check, disable_recommends) - - if rc != 0: - if stderr: - module.fail_json(msg=stderr) - else: - module.fail_json(msg=stdout) - - result['changed'] = changed - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/zypper_repository b/library/packaging/zypper_repository deleted file mode 100644 index 1eb4ffdb343..00000000000 --- a/library/packaging/zypper_repository +++ /dev/null @@ -1,221 +0,0 @@ -#!/usr/bin/python -# encoding: utf-8 - -# (c) 2013, Matthias Vogelgesang -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - -DOCUMENTATION = ''' ---- -module: zypper_repository -author: Matthias Vogelgesang -version_added: "1.4" -short_description: Add and remove Zypper repositories -description: - - Add or remove Zypper repositories on SUSE and openSUSE -options: - name: - required: false - default: none - description: - - A name for the repository. Not required when adding repofiles. - repo: - required: false - default: none - description: - - URI of the repository or .repo file. Required when state=present. - state: - required: false - choices: [ "absent", "present" ] - default: "present" - description: - - A source string state. - description: - required: false - default: none - description: - - A description of the repository - disable_gpg_check: - description: - - Whether to disable GPG signature checking of - all packages. Has an effect only if state is - I(present). - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] -notes: [] -requirements: [ zypper ] -''' - -EXAMPLES = ''' -# Add NVIDIA repository for graphics drivers -- zypper_repository: name=nvidia-repo repo='ftp://download.nvidia.com/opensuse/12.2' state=present - -# Remove NVIDIA repository -- zypper_repository: name=nvidia-repo repo='ftp://download.nvidia.com/opensuse/12.2' state=absent - -# Add python development repository -- zypper_repository: repo=http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo -''' -from xml.dom.minidom import parseString as parseXML - -REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck'] - - -def _parse_repos(module): - """parses the output of zypper -x lr and returns a parse repo dictionary""" - cmd = ['/usr/bin/zypper', '-x', 'lr'] - repos = [] - - rc, stdout, stderr = module.run_command(cmd, check_rc=True) - dom = parseXML(stdout) - repo_list = dom.getElementsByTagName('repo') - for repo in repo_list: - opts = {} - for o in REPO_OPTS: - opts[o] = repo.getAttribute(o) - opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data - # A repo can be uniquely identified by an alias + url - repos.append(opts) - - return repos - - -def repo_exists(module, **kwargs): - - def repo_subset(realrepo, repocmp): - for k in repocmp: - if k not in realrepo: - return False - - for k, v in realrepo.items(): - if k in repocmp: - if v.rstrip("/") != repocmp[k].rstrip("/"): - return False - return True - - repos = _parse_repos(module) - - for repo in repos: - if repo_subset(repo, kwargs): - return True - return False - - -def add_repo(module, repo, alias, description, disable_gpg_check): - cmd = ['/usr/bin/zypper', 'ar', '--check', '--refresh'] - - if description: - cmd.extend(['--name', description]) - - if disable_gpg_check: - cmd.append('--no-gpgcheck') - - cmd.append(repo) - - if not repo.endswith('.repo'): - cmd.append(alias) - - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - changed = rc == 0 - if rc == 0: - changed = True - elif 'already exists. Please use another alias' in stderr: - changed = False - else: - module.fail_json(msg=stderr if stderr else stdout) - - return changed - - -def remove_repo(module, repo, alias): - - cmd = ['/usr/bin/zypper', 'rr'] - if alias: - cmd.append(alias) - else: - cmd.append(repo) - - rc, stdout, stderr = module.run_command(cmd, check_rc=True) - changed = rc == 0 - return changed - - -def fail_if_rc_is_null(module, rc, stdout, stderr): - if rc != 0: - module.fail_json(msg=stderr if stderr else stdout) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=False), - repo=dict(required=False), - state=dict(choices=['present', 'absent'], default='present'), - description=dict(required=False), - disable_gpg_check = dict(required=False, default='no', type='bool'), - ), - supports_check_mode=False, - ) - - repo = module.params['repo'] - state = module.params['state'] - name = module.params['name'] - description = module.params['description'] - disable_gpg_check = module.params['disable_gpg_check'] - - def exit_unchanged(): - module.exit_json(changed=False, repo=repo, state=state, name=name) - - # Check run-time module parameters - if state == 'present' and not repo: - module.fail_json(msg='Module option state=present requires repo') - if state == 'absent' and not repo and not name: - module.fail_json(msg='Alias or repo parameter required when state=absent') - - if repo and repo.endswith('.repo'): - if name: - module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding repo files') - else: - if not name and state == "present": - module.fail_json(msg='Name required when adding non-repo files:') - - if repo and repo.endswith('.repo'): - exists = repo_exists(module, url=repo, alias=name) - elif repo: - exists = repo_exists(module, url=repo) - else: - exists = repo_exists(module, alias=name) - - if state == 'present': - if exists: - exit_unchanged() - - changed = add_repo(module, repo, name, description, disable_gpg_check) - elif state == 'absent': - if not exists: - exit_unchanged() - - changed = remove_repo(module, repo, name) - - module.exit_json(changed=changed, repo=repo, state=state) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/source_control/bzr b/library/source_control/bzr deleted file mode 100644 index 996150a39af..00000000000 --- a/library/source_control/bzr +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, André Paramés -# Based on the Git module by Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = u''' ---- -module: bzr -author: André Paramés -version_added: "1.1" -short_description: Deploy software (or files) from bzr branches -description: - - Manage I(bzr) branches to deploy files or software. -options: - name: - required: true - aliases: [ 'parent' ] - description: - - SSH or HTTP protocol address of the parent branch. - dest: - required: true - description: - - Absolute path of where the branch should be cloned to. - version: - required: false - default: "head" - description: - - What version of the branch to clone. This can be the - bzr revno or revid. - force: - required: false - default: "yes" - choices: [ 'yes', 'no' ] - description: - - If C(yes), any modified files in the working - tree will be discarded. - executable: - required: false - default: null - version_added: "1.4" - description: - - Path to bzr executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. -''' - -EXAMPLES = ''' -# Example bzr checkout from Ansible Playbooks -- bzr: name=bzr+ssh://foosball.example.org/path/to/branch dest=/srv/checkout version=22 -''' - -import re - - -class Bzr(object): - def __init__(self, module, parent, dest, version, bzr_path): - self.module = module - self.parent = parent - self.dest = dest - self.version = version - self.bzr_path = bzr_path - - def _command(self, args_list, cwd=None, **kwargs): - (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs) - return (rc, out, err) - - def get_version(self): - '''samples the version of the bzr branch''' - - cmd = "%s revno" % self.bzr_path - rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) - revno = stdout.strip() - return revno - - def clone(self): - '''makes a new bzr branch if it does not already exist''' - dest_dirname = os.path.dirname(self.dest) - try: - os.makedirs(dest_dirname) - except: - pass - if self.version.lower() != 'head': - args_list = ["branch", "-r", self.version, self.parent, self.dest] - else: - args_list = ["branch", self.parent, self.dest] - return self._command(args_list, check_rc=True, cwd=dest_dirname) - - def has_local_mods(self): - - cmd = "%s status -S" % self.bzr_path - rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) - lines = stdout.splitlines() - - lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) - return len(lines) > 0 - - def reset(self, force): - ''' - Resets the index and working tree to head. - Discards any changes to tracked files in the working - tree since that commit. - ''' - if not force and self.has_local_mods(): - self.module.fail_json(msg="Local modifications exist in branch (force=no).") - return self._command(["revert"], check_rc=True, cwd=self.dest) - - def fetch(self): - '''updates branch from remote sources''' - if self.version.lower() != 'head': - (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest) - else: - (rc, out, err) = self._command(["pull"], cwd=self.dest) - if rc != 0: - self.module.fail_json(msg="Failed to pull") - return (rc, out, err) - - def switch_version(self): - '''once pulled, switch to a particular revno or revid''' - if self.version.lower() != 'head': - args_list = ["revert", "-r", self.version] - else: - args_list = ["revert"] - return self._command(args_list, check_rc=True, cwd=self.dest) - -# =========================================== - -def main(): - module = AnsibleModule( - argument_spec = dict( - dest=dict(required=True), - name=dict(required=True, aliases=['parent']), - version=dict(default='head'), - force=dict(default='yes', type='bool'), - executable=dict(default=None), - ) - ) - - dest = os.path.abspath(os.path.expanduser(module.params['dest'])) - parent = module.params['name'] - version = module.params['version'] - force = module.params['force'] - bzr_path = module.params['executable'] or module.get_bin_path('bzr', True) - - bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf') - - rc, out, err, status = (0, None, None, None) - - bzr = Bzr(module, parent, dest, version, bzr_path) - - # if there is no bzr configuration, do a branch operation - # else pull and switch the version - before = None - local_mods = False - if not os.path.exists(bzrconfig): - (rc, out, err) = bzr.clone() - - else: - # else do a pull - local_mods = bzr.has_local_mods() - before = bzr.get_version() - (rc, out, err) = bzr.reset(force) - if rc != 0: - module.fail_json(msg=err) - (rc, out, err) = bzr.fetch() - if rc != 0: - module.fail_json(msg=err) - - # switch to version specified regardless of whether - # we cloned or pulled - (rc, out, err) = bzr.switch_version() - - # determine if we changed anything - after = bzr.get_version() - changed = False - - if before != after or local_mods: - changed = True - - module.exit_json(changed=changed, before=before, after=after) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/source_control/git b/library/source_control/git deleted file mode 100644 index 15ca0fd07a5..00000000000 --- a/library/source_control/git +++ /dev/null @@ -1,600 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: git -author: Michael DeHaan -version_added: "0.0.1" -short_description: Deploy software (or files) from git checkouts -description: - - Manage I(git) checkouts of repositories to deploy files or software. -options: - repo: - required: true - aliases: [ name ] - description: - - git, SSH, or HTTP protocol address of the git repository. - dest: - required: true - description: - - Absolute path of where the repository should be checked out to. - version: - required: false - default: "HEAD" - description: - - What version of the repository to check out. This can be the - full 40-character I(SHA-1) hash, the literal string C(HEAD), a - branch name, or a tag name. - accept_hostkey: - required: false - default: "no" - choices: [ "yes", "no" ] - version_added: "1.5" - description: - - if C(yes), adds the hostkey for the repo url if not already - added. If ssh_args contains "-o StrictHostKeyChecking=no", - this parameter is ignored. - ssh_opts: - required: false - default: None - version_added: "1.5" - description: - - Creates a wrapper script and exports the path as GIT_SSH - which git then automatically uses to override ssh arguments. - An example value could be "-o StrictHostKeyChecking=no" - key_file: - required: false - default: None - version_added: "1.5" - description: - - Uses the same wrapper method as ssh_opts to pass - "-i " to the ssh arguments used by git - reference: - required: false - default: null - version_added: "1.4" - description: - - Reference repository (see "git clone --reference ...") - remote: - required: false - default: "origin" - description: - - Name of the remote. - force: - required: false - default: "yes" - choices: [ "yes", "no" ] - version_added: "0.7" - description: - - If C(yes), any modified files in the working - repository will be discarded. Prior to 0.7, this was always - 'yes' and could not be disabled. - depth: - required: false - default: null - version_added: "1.2" - description: - - Create a shallow clone with a history truncated to the specified - number or revisions. The minimum possible value is C(1), otherwise - ignored. - update: - required: false - default: "yes" - choices: [ "yes", "no" ] - version_added: "1.2" - description: - - If C(no), just returns information about the repository without updating. - executable: - required: false - default: null - version_added: "1.4" - description: - - Path to git executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. - bare: - required: false - default: "no" - choices: [ "yes", "no" ] - version_added: "1.4" - description: - - if C(yes), repository will be created as a bare repo, otherwise - it will be a standard repo with a workspace. - - recursive: - required: false - default: "yes" - choices: [ "yes", "no" ] - version_added: "1.6" - description: - - if C(no), repository will be cloned without the --recursive - option, skipping sub-modules. -notes: - - "If the task seems to be hanging, first verify remote host is in C(known_hosts). - SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, - one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling - the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts." -''' - -EXAMPLES = ''' -# Example git checkout from Ansible Playbooks -- git: repo=git://foosball.example.org/path/to/repo.git - dest=/srv/checkout - version=release-0.22 - -# Example read-write git checkout from github -- git: repo=ssh://git@github.com/mylogin/hello.git dest=/home/mylogin/hello - -# Example just ensuring the repo checkout exists -- git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout update=no -''' - -import re -import tempfile - -def get_submodule_update_params(module, git_path, cwd): - - #or: git submodule [--quiet] update [--init] [-N|--no-fetch] - #[-f|--force] [--rebase] [--reference ] [--merge] - #[--recursive] [--] [...] - - params = [] - - # run a bad submodule command to get valid params - cmd = "%s submodule update --help" % (git_path) - rc, stdout, stderr = module.run_command(cmd, cwd=cwd) - lines = stderr.split('\n') - update_line = None - for line in lines: - if 'git submodule [--quiet] update ' in line: - update_line = line - if update_line: - update_line = update_line.replace('[','') - update_line = update_line.replace(']','') - update_line = update_line.replace('|',' ') - parts = shlex.split(update_line) - for part in parts: - if part.startswith('--'): - part = part.replace('--', '') - params.append(part) - - return params - -def write_ssh_wrapper(): - module_dir = get_module_path() - try: - # make sure we have full permission to the module_dir, which - # may not be the case if we're sudo'ing to a non-root user - if os.access(module_dir, os.W_OK|os.R_OK|os.X_OK): - fd, wrapper_path = tempfile.mkstemp(prefix=module_dir + '/') - else: - raise OSError - except (IOError, OSError): - fd, wrapper_path = tempfile.mkstemp() - fh = os.fdopen(fd, 'w+b') - template = """#!/bin/sh -if [ -z "$GIT_SSH_OPTS" ]; then - BASEOPTS="" -else - BASEOPTS=$GIT_SSH_OPTS -fi - -if [ -z "$GIT_KEY" ]; then - ssh $BASEOPTS "$@" -else - ssh -i "$GIT_KEY" $BASEOPTS "$@" -fi -""" - fh.write(template) - fh.close() - st = os.stat(wrapper_path) - os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC) - return wrapper_path - -def set_git_ssh(ssh_wrapper, key_file, ssh_opts): - - if os.environ.get("GIT_SSH"): - del os.environ["GIT_SSH"] - os.environ["GIT_SSH"] = ssh_wrapper - - if os.environ.get("GIT_KEY"): - del os.environ["GIT_KEY"] - - if key_file: - os.environ["GIT_KEY"] = key_file - - if os.environ.get("GIT_SSH_OPTS"): - del os.environ["GIT_SSH_OPTS"] - - if ssh_opts: - os.environ["GIT_SSH_OPTS"] = ssh_opts - -def get_version(module, git_path, dest, ref="HEAD"): - ''' samples the version of the git repo ''' - - cmd = "%s rev-parse %s" % (git_path, ref) - rc, stdout, stderr = module.run_command(cmd, cwd=dest) - sha = stdout.rstrip('\n') - return sha - -def clone(git_path, module, repo, dest, remote, depth, version, bare, - reference, recursive): - ''' makes a new git repo if it does not already exist ''' - dest_dirname = os.path.dirname(dest) - try: - os.makedirs(dest_dirname) - except: - pass - cmd = [ git_path, 'clone' ] - if bare: - cmd.append('--bare') - else: - cmd.extend([ '--origin', remote ]) - if recursive: - cmd.extend([ '--recursive' ]) - if is_remote_branch(git_path, module, dest, repo, version) \ - or is_remote_tag(git_path, module, dest, repo, version): - cmd.extend([ '--branch', version ]) - if depth: - cmd.extend([ '--depth', str(depth) ]) - if reference: - cmd.extend([ '--reference', str(reference) ]) - cmd.extend([ repo, dest ]) - module.run_command(cmd, check_rc=True, cwd=dest_dirname) - if bare: - if remote != 'origin': - module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest) - -def has_local_mods(module, git_path, dest, bare): - if bare: - return False - - cmd = "%s status -s" % (git_path) - rc, stdout, stderr = module.run_command(cmd, cwd=dest) - lines = stdout.splitlines() - lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) - - return len(lines) > 0 - -def reset(git_path, module, dest): - ''' - Resets the index and working tree to HEAD. - Discards any changes to tracked files in working - tree since that commit. - ''' - cmd = "%s reset --hard HEAD" % (git_path,) - return module.run_command(cmd, check_rc=True, cwd=dest) - -def get_remote_head(git_path, module, dest, version, remote, bare): - cloning = False - cwd = None - if remote == module.params['repo']: - cloning = True - else: - cwd = dest - if version == 'HEAD': - if cloning: - # cloning the repo, just get the remote's HEAD version - cmd = '%s ls-remote %s -h HEAD' % (git_path, remote) - else: - head_branch = get_head_branch(git_path, module, dest, remote, bare) - cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, head_branch) - elif is_remote_branch(git_path, module, dest, remote, version): - cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version) - elif is_remote_tag(git_path, module, dest, remote, version): - cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version) - else: - # appears to be a sha1. return as-is since it appears - # cannot check for a specific sha1 on remote - return version - (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd) - if len(out) < 1: - module.fail_json(msg="Could not determine remote revision for %s" % version) - rev = out.split()[0] - return rev - -def is_remote_tag(git_path, module, dest, remote, version): - cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version) - (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) - if version in out: - return True - else: - return False - -def get_branches(git_path, module, dest): - branches = [] - cmd = '%s branch -a' % (git_path,) - (rc, out, err) = module.run_command(cmd, cwd=dest) - if rc != 0: - module.fail_json(msg="Could not determine branch data - received %s" % out) - for line in out.split('\n'): - branches.append(line.strip()) - return branches - -def get_tags(git_path, module, dest): - tags = [] - cmd = '%s tag' % (git_path,) - (rc, out, err) = module.run_command(cmd, cwd=dest) - if rc != 0: - module.fail_json(msg="Could not determine tag data - received %s" % out) - for line in out.split('\n'): - tags.append(line.strip()) - return tags - -def is_remote_branch(git_path, module, dest, remote, version): - cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version) - (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) - if version in out: - return True - else: - return False - -def is_local_branch(git_path, module, dest, branch): - branches = get_branches(git_path, module, dest) - lbranch = '%s' % branch - if lbranch in branches: - return True - elif '* %s' % branch in branches: - return True - else: - return False - -def is_not_a_branch(git_path, module, dest): - branches = get_branches(git_path, module, dest) - for b in branches: - if b.startswith('* ') and 'no branch' in b: - return True - return False - -def get_head_branch(git_path, module, dest, remote, bare=False): - ''' - Determine what branch HEAD is associated with. This is partly - taken from lib/ansible/utils/__init__.py. It finds the correct - path to .git/HEAD and reads from that file the branch that HEAD is - associated with. In the case of a detached HEAD, this will look - up the branch in .git/refs/remotes//HEAD. - ''' - if bare: - repo_path = dest - else: - repo_path = os.path.join(dest, '.git') - # Check if the .git is a file. If it is a file, it means that we are in a submodule structure. - if os.path.isfile(repo_path): - try: - gitdir = yaml.safe_load(open(repo_path)).get('gitdir') - # There is a posibility the .git file to have an absolute path. - if os.path.isabs(gitdir): - repo_path = gitdir - else: - repo_path = os.path.join(repo_path.split('.git')[0], gitdir) - except (IOError, AttributeError): - return '' - # Read .git/HEAD for the name of the branch. - # If we're in a detached HEAD state, look up the branch associated with - # the remote HEAD in .git/refs/remotes//HEAD - f = open(os.path.join(repo_path, "HEAD")) - if is_not_a_branch(git_path, module, dest): - f.close() - f = open(os.path.join(repo_path, 'refs', 'remotes', remote, 'HEAD')) - branch = f.readline().split('/')[-1].rstrip("\n") - f.close() - return branch - -def fetch(git_path, module, repo, dest, version, remote, bare): - ''' updates repo from remote sources ''' - (rc, out0, err0) = module.run_command([git_path, 'remote', 'set-url', remote, repo], cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to set a new url %s for %s: %s" % (repo, remote, out0 + err0)) - if bare: - (rc, out1, err1) = module.run_command([git_path, 'fetch', remote, '+refs/heads/*:refs/heads/*'], cwd=dest) - else: - (rc, out1, err1) = module.run_command("%s fetch %s" % (git_path, remote), cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to download remote objects and refs") - - if bare: - (rc, out2, err2) = module.run_command([git_path, 'fetch', remote, '+refs/tags/*:refs/tags/*'], cwd=dest) - else: - (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote), cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to download remote objects and refs") - (rc, out3, err3) = submodule_update(git_path, module, dest) - return (rc, out1 + out2 + out3, err1 + err2 + err3) - -def submodule_update(git_path, module, dest): - ''' init and update any submodules ''' - - # get the valid submodule params - params = get_submodule_update_params(module, git_path, dest) - - # skip submodule commands if .gitmodules is not present - if not os.path.exists(os.path.join(dest, '.gitmodules')): - return (0, '', '') - cmd = [ git_path, 'submodule', 'sync' ] - (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) - if 'remote' in params: - cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ,'--remote' ] - else: - cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ] - (rc, out, err) = module.run_command(cmd, cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to init/update submodules: %s" % out + err) - return (rc, out, err) - -def switch_version(git_path, module, dest, remote, version, recursive): - ''' once pulled, switch to a particular SHA, tag, or branch ''' - cmd = '' - if version != 'HEAD': - if is_remote_branch(git_path, module, dest, remote, version): - if not is_local_branch(git_path, module, dest, version): - cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version) - else: - (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to checkout branch %s" % version) - cmd = "%s reset --hard %s/%s" % (git_path, remote, version) - else: - cmd = "%s checkout --force %s" % (git_path, version) - else: - branch = get_head_branch(git_path, module, dest, remote) - (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to checkout branch %s" % branch) - cmd = "%s reset --hard %s" % (git_path, remote) - (rc, out1, err1) = module.run_command(cmd, cwd=dest) - if rc != 0: - if version != 'HEAD': - module.fail_json(msg="Failed to checkout %s" % (version)) - else: - module.fail_json(msg="Failed to checkout branch %s" % (branch)) - if recursive: - (rc, out2, err2) = submodule_update(git_path, module, dest) - out1 += out2 - err1 += err1 - return (rc, out1, err1) - -# =========================================== - -def main(): - module = AnsibleModule( - argument_spec = dict( - dest=dict(required=True), - repo=dict(required=True, aliases=['name']), - version=dict(default='HEAD'), - remote=dict(default='origin'), - reference=dict(default=None), - force=dict(default='yes', type='bool'), - depth=dict(default=None, type='int'), - update=dict(default='yes', type='bool'), - accept_hostkey=dict(default='no', type='bool'), - key_file=dict(default=None, required=False), - ssh_opts=dict(default=None, required=False), - executable=dict(default=None), - bare=dict(default='no', type='bool'), - recursive=dict(default='yes', type='bool'), - ), - supports_check_mode=True - ) - - dest = os.path.abspath(os.path.expanduser(module.params['dest'])) - repo = module.params['repo'] - version = module.params['version'] - remote = module.params['remote'] - force = module.params['force'] - depth = module.params['depth'] - update = module.params['update'] - bare = module.params['bare'] - reference = module.params['reference'] - git_path = module.params['executable'] or module.get_bin_path('git', True) - - key_file = module.params['key_file'] - ssh_opts = module.params['ssh_opts'] - - # create a wrapper script and export - # GIT_SSH= as an environment variable - # for git to use the wrapper script - ssh_wrapper = None - if key_file or ssh_opts: - ssh_wrapper = write_ssh_wrapper() - set_git_ssh(ssh_wrapper, key_file, ssh_opts) - module.add_cleanup_file(path=ssh_wrapper) - - # add the git repo's hostkey - if module.params['ssh_opts'] is not None: - if not "-o StrictHostKeyChecking=no" in module.params['ssh_opts']: - add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) - else: - add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) - - recursive = module.params['recursive'] - - if bare: - gitconfig = os.path.join(dest, 'config') - else: - gitconfig = os.path.join(dest, '.git', 'config') - - rc, out, err, status = (0, None, None, None) - - # if there is no git configuration, do a clone operation - # else pull and switch the version - before = None - local_mods = False - if not os.path.exists(gitconfig): - if module.check_mode: - remote_head = get_remote_head(git_path, module, dest, version, repo, bare) - module.exit_json(changed=True, before=before, after=remote_head) - clone(git_path, module, repo, dest, remote, depth, version, bare, - reference, recursive) - elif not update: - # Just return having found a repo already in the dest path - # this does no checking that the repo is the actual repo - # requested. - before = get_version(module, git_path, dest) - module.exit_json(changed=False, before=before, after=before) - else: - # else do a pull - local_mods = has_local_mods(module, git_path, dest, bare) - before = get_version(module, git_path, dest) - if local_mods: - # failure should happen regardless of check mode - if not force: - module.fail_json(msg="Local modifications exist in repository (force=no).") - # if force and in non-check mode, do a reset - if not module.check_mode: - reset(git_path, module, dest) - # exit if already at desired sha version - remote_head = get_remote_head(git_path, module, dest, version, remote, bare) - if before == remote_head: - if local_mods: - module.exit_json(changed=True, before=before, after=remote_head, - msg="Local modifications exist") - elif is_remote_tag(git_path, module, dest, repo, version): - # if the remote is a tag and we have the tag locally, exit early - if version in get_tags(git_path, module, dest): - module.exit_json(changed=False, before=before, after=remote_head) - else: - module.exit_json(changed=False, before=before, after=remote_head) - if module.check_mode: - module.exit_json(changed=True, before=before, after=remote_head) - fetch(git_path, module, repo, dest, version, remote, bare) - - # switch to version specified regardless of whether - # we cloned or pulled - if not bare: - switch_version(git_path, module, dest, remote, version, recursive) - - # determine if we changed anything - after = get_version(module, git_path, dest) - changed = False - - if before != after or local_mods: - changed = True - - # cleanup the wrapper script - if ssh_wrapper: - os.remove(ssh_wrapper) - - module.exit_json(changed=changed, before=before, after=after) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.known_hosts import * - -main() diff --git a/library/source_control/github_hooks b/library/source_control/github_hooks deleted file mode 100644 index 6a8d1ced935..00000000000 --- a/library/source_control/github_hooks +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Phillip Gentry -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. - -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import json -import base64 - -DOCUMENTATION = ''' ---- -module: github_hooks -short_description: Manages github service hooks. -description: - - Adds service hooks and removes service hooks that have an error status. -version_added: "1.4" -options: - user: - description: - - Github username. - required: true - oauthkey: - description: - - The oauth key provided by github. It can be found/generated on github under "Edit Your Profile" >> "Applications" >> "Personal Access Tokens" - required: true - repo: - description: - - "This is the API url for the repository you want to manage hooks for. It should be in the form of: https://api.github.com/repos/user:/repo:. Note this is different than the normal repo url." - required: true - hookurl: - description: - - When creating a new hook, this is the url that you want github to post to. It is only required when creating a new hook. - required: false - action: - description: - - This tells the githooks module what you want it to do. - required: true - choices: [ "create", "cleanall" ] - validate_certs: - description: - - If C(no), SSL certificates for the target repo will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - -author: Phillip Gentry, CX Inc -''' - -EXAMPLES = ''' -# Example creating a new service hook. It ignores duplicates. -- github_hooks: action=create hookurl=http://11.111.111.111:2222 user={{ gituser }} oauthkey={{ oauthkey }} repo=https://api.github.com/repos/pcgentry/Github-Auto-Deploy - -# Cleaning all hooks for this repo that had an error on the last update. Since this works for all hooks in a repo it is probably best that this would be called from a handler. -- local_action: github_hooks action=cleanall user={{ gituser }} oauthkey={{ oauthkey }} repo={{ repo }} -''' - -def list(module, hookurl, oauthkey, repo, user): - url = "%s/hooks" % repo - auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '') - headers = { - 'Authorization': 'Basic %s' % auth, - } - response, info = fetch_url(module, url, headers=headers) - if info['status'] != 200: - return False, '' - else: - return False, response.read() - -def clean504(module, hookurl, oauthkey, repo, user): - current_hooks = list(hookurl, oauthkey, repo, user)[1] - decoded = json.loads(current_hooks) - - for hook in decoded: - if hook['last_response']['code'] == 504: - # print "Last response was an ERROR for hook:" - # print hook['id'] - delete(module, hookurl, oauthkey, repo, user, hook['id']) - - return 0, current_hooks - -def cleanall(module, hookurl, oauthkey, repo, user): - current_hooks = list(hookurl, oauthkey, repo, user)[1] - decoded = json.loads(current_hooks) - - for hook in decoded: - if hook['last_response']['code'] != 200: - # print "Last response was an ERROR for hook:" - # print hook['id'] - delete(module, hookurl, oauthkey, repo, user, hook['id']) - - return 0, current_hooks - -def create(module, hookurl, oauthkey, repo, user): - url = "%s/hooks" % repo - values = { - "active": True, - "name": "web", - "config": { - "url": "%s" % hookurl, - "content_type": "json" - } - } - data = json.dumps(values) - auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '') - headers = { - 'Authorization': 'Basic %s' % auth, - } - response, info = fetch_url(module, url, data=data, headers=headers) - if info['status'] != 200: - return 0, '[]' - else: - return 0, response.read() - -def delete(module, hookurl, oauthkey, repo, user, hookid): - url = "%s/hooks/%s" % (repo, hookid) - auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '') - headers = { - 'Authorization': 'Basic %s' % auth, - } - response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE') - return response.read() - -def main(): - module = AnsibleModule( - argument_spec=dict( - action=dict(required=True), - hookurl=dict(required=False), - oauthkey=dict(required=True), - repo=dict(required=True), - user=dict(required=True), - validate_certs=dict(default='yes', type='bool'), - ) - ) - - action = module.params['action'] - hookurl = module.params['hookurl'] - oauthkey = module.params['oauthkey'] - repo = module.params['repo'] - user = module.params['user'] - - if action == "list": - (rc, out) = list(module, hookurl, oauthkey, repo, user) - - if action == "clean504": - (rc, out) = clean504(module, hookurl, oauthkey, repo, user) - - if action == "cleanall": - (rc, out) = cleanall(module, hookurl, oauthkey, repo, user) - - if action == "create": - (rc, out) = create(module, hookurl, oauthkey, repo, user) - - if rc != 0: - module.fail_json(msg="failed", result=out) - - module.exit_json(msg="success", result=out) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/source_control/hg b/library/source_control/hg deleted file mode 100644 index 1b95bcd5ac3..00000000000 --- a/library/source_control/hg +++ /dev/null @@ -1,238 +0,0 @@ -#!/usr/bin/python -#-*- coding: utf-8 -*- - -# (c) 2013, Yeukhon Wong -# -# This module was originally inspired by Brad Olson's ansible-module-mercurial -# . This module tends -# to follow the git module implementation. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import ConfigParser - -DOCUMENTATION = ''' ---- -module: hg -short_description: Manages Mercurial (hg) repositories. -description: - - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address. -version_added: "1.0" -author: Yeukhon Wong -options: - repo: - description: - - The repository address. - required: true - default: null - aliases: [ name ] - dest: - description: - - Absolute path of where the repository should be cloned to. - required: true - default: null - revision: - description: - - Equivalent C(-r) option in hg command which could be the changeset, revision number, - branch name or even tag. - required: false - default: "default" - aliases: [ version ] - force: - description: - - Discards uncommitted changes. Runs C(hg update -C). - required: false - default: "yes" - choices: [ "yes", "no" ] - purge: - description: - - Deletes untracked files. Runs C(hg purge). - required: false - default: "no" - choices: [ "yes", "no" ] - executable: - required: false - default: null - version_added: "1.4" - description: - - Path to hg executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. -notes: - - "If the task seems to be hanging, first verify remote host is in C(known_hosts). - SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, - one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling - the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts." -requirements: [ ] -''' - -EXAMPLES = ''' -# Ensure the current working copy is inside the stable branch and deletes untracked files if any. -- hg: repo=https://bitbucket.org/user/repo1 dest=/home/user/repo1 revision=stable purge=yes -''' - -class Hg(object): - - def __init__(self, module, dest, repo, revision, hg_path): - self.module = module - self.dest = dest - self.repo = repo - self.revision = revision - self.hg_path = hg_path - - def _command(self, args_list): - (rc, out, err) = self.module.run_command([self.hg_path] + args_list) - return (rc, out, err) - - def _list_untracked(self): - args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print'] - return self._command(args) - - def get_revision(self): - """ - hg id -b -i -t returns a string in the format: - "[+] " - This format lists the state of the current working copy, - and indicates whether there are uncommitted changes by the - plus sign. Otherwise, the sign is omitted. - - Read the full description via hg id --help - """ - (rc, out, err) = self._command(['id', '-b', '-i', '-t', '-R', self.dest]) - if rc != 0: - self.module.fail_json(msg=err) - else: - return out.strip('\n') - - def has_local_mods(self): - now = self.get_revision() - if '+' in now: - return True - else: - return False - - def discard(self): - before = self.has_local_mods() - if not before: - return False - - (rc, out, err) = self._command(['update', '-C', '-R', self.dest]) - if rc != 0: - self.module.fail_json(msg=err) - - after = self.has_local_mods() - if before != after and not after: # no more local modification - return True - - def purge(self): - # before purge, find out if there are any untracked files - (rc1, out1, err1) = self._list_untracked() - if rc1 != 0: - self.module.fail_json(msg=err1) - - # there are some untrackd files - if out1 != '': - args = ['purge', '--config', 'extensions.purge=', '-R', self.dest] - (rc2, out2, err2) = self._command(args) - if rc2 != 0: - self.module.fail_json(msg=err2) - return True - else: - return False - - def cleanup(self, force, purge): - discarded = False - purged = False - - if force: - discarded = self.discard() - if purge: - purged = self.purge() - if discarded or purged: - return True - else: - return False - - def pull(self): - return self._command( - ['pull', '-R', self.dest, self.repo]) - - def update(self): - return self._command(['update', '-R', self.dest]) - - def clone(self): - return self._command(['clone', self.repo, self.dest, '-r', self.revision]) - - def switch_version(self): - return self._command(['update', '-r', self.revision, '-R', self.dest]) - -# =========================================== - -def main(): - module = AnsibleModule( - argument_spec = dict( - repo = dict(required=True, aliases=['name']), - dest = dict(required=True), - revision = dict(default="default", aliases=['version']), - force = dict(default='yes', type='bool'), - purge = dict(default='no', type='bool'), - executable = dict(default=None), - ), - ) - repo = module.params['repo'] - dest = os.path.expanduser(module.params['dest']) - revision = module.params['revision'] - force = module.params['force'] - purge = module.params['purge'] - hg_path = module.params['executable'] or module.get_bin_path('hg', True) - hgrc = os.path.join(dest, '.hg/hgrc') - - # initial states - before = '' - changed = False - cleaned = False - - hg = Hg(module, dest, repo, revision, hg_path) - - # If there is no hgrc file, then assume repo is absent - # and perform clone. Otherwise, perform pull and update. - if not os.path.exists(hgrc): - (rc, out, err) = hg.clone() - if rc != 0: - module.fail_json(msg=err) - else: - # get the current state before doing pulling - before = hg.get_revision() - - # can perform force and purge - cleaned = hg.cleanup(force, purge) - - (rc, out, err) = hg.pull() - if rc != 0: - module.fail_json(msg=err) - - (rc, out, err) = hg.update() - if rc != 0: - module.fail_json(msg=err) - - hg.switch_version() - after = hg.get_revision() - if before != after or cleaned: - changed = True - module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/source_control/subversion b/library/source_control/subversion deleted file mode 100644 index 6709a8c3939..00000000000 --- a/library/source_control/subversion +++ /dev/null @@ -1,231 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: subversion -short_description: Deploys a subversion repository. -description: - - Deploy given repository URL / revision to dest. If dest exists, update to the specified revision, otherwise perform a checkout. -version_added: "0.7" -author: Dane Summers, njharman@gmail.com -notes: - - Requires I(svn) to be installed on the client. -requirements: [] -options: - repo: - description: - - The subversion URL to the repository. - required: true - aliases: [ name, repository ] - default: null - dest: - description: - - Absolute path where the repository should be deployed. - required: true - default: null - revision: - description: - - Specific revision to checkout. - required: false - default: HEAD - aliases: [ version ] - force: - description: - - If C(yes), modified files will be discarded. If C(no), module will fail if it encounters modified files. - required: false - default: "yes" - choices: [ "yes", "no" ] - username: - description: - - --username parameter passed to svn. - required: false - default: null - password: - description: - - --password parameter passed to svn. - required: false - default: null - executable: - required: false - default: null - version_added: "1.4" - description: - - Path to svn executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. - export: - required: false - default: "no" - choices: [ "yes", "no" ] - version_added: "1.6" - description: - - If C(yes), do export instead of checkout/update. -''' - -EXAMPLES = ''' -# Checkout subversion repository to specified folder. -- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/checkout - -# Export subversion directory to folder -- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/export export=True -''' - -import re -import tempfile - - -class Subversion(object): - def __init__( - self, module, dest, repo, revision, username, password, svn_path): - self.module = module - self.dest = dest - self.repo = repo - self.revision = revision - self.username = username - self.password = password - self.svn_path = svn_path - - def _exec(self, args): - bits = [ - self.svn_path, - '--non-interactive', - '--trust-server-cert', - '--no-auth-cache', - ] - if self.username: - bits.extend(["--username", self.username]) - if self.password: - bits.extend(["--password", self.password]) - bits.extend(args) - rc, out, err = self.module.run_command(bits, check_rc=True) - return out.splitlines() - - def checkout(self): - '''Creates new svn working directory if it does not already exist.''' - self._exec(["checkout", "-r", self.revision, self.repo, self.dest]) - - def export(self, force=False): - '''Export svn repo to directory''' - self._exec(["export", "-r", self.revision, self.repo, self.dest]) - - def switch(self): - '''Change working directory's repo.''' - # switch to ensure we are pointing at correct repo. - self._exec(["switch", self.repo, self.dest]) - - def update(self): - '''Update existing svn working directory.''' - self._exec(["update", "-r", self.revision, self.dest]) - - def revert(self): - '''Revert svn working directory.''' - self._exec(["revert", "-R", self.dest]) - - def get_revision(self): - '''Revision and URL of subversion working directory.''' - text = '\n'.join(self._exec(["info", self.dest])) - rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0) - url = re.search(r'^URL:.*$', text, re.MULTILINE).group(0) - return rev, url - - def has_local_mods(self): - '''True if revisioned files have been added or modified. Unrevisioned files are ignored.''' - lines = self._exec(["status", self.dest]) - # Match only revisioned files, i.e. ignore status '?'. - regex = re.compile(r'^[^?]') - # Has local mods if more than 0 modifed revisioned files. - return len(filter(regex.match, lines)) > 0 - - def needs_update(self): - curr, url = self.get_revision() - out2 = '\n'.join(self._exec(["info", "-r", "HEAD", self.dest])) - head = re.search(r'^Revision:.*$', out2, re.MULTILINE).group(0) - rev1 = int(curr.split(':')[1].strip()) - rev2 = int(head.split(':')[1].strip()) - change = False - if rev1 < rev2: - change = True - return change, curr, head - - -# =========================================== - -def main(): - module = AnsibleModule( - argument_spec=dict( - dest=dict(required=True), - repo=dict(required=True, aliases=['name', 'repository']), - revision=dict(default='HEAD', aliases=['rev', 'version']), - force=dict(default='yes', type='bool'), - username=dict(required=False), - password=dict(required=False), - executable=dict(default=None), - export=dict(default=False, required=False, type='bool'), - ), - supports_check_mode=True - ) - - dest = os.path.expanduser(module.params['dest']) - repo = module.params['repo'] - revision = module.params['revision'] - force = module.params['force'] - username = module.params['username'] - password = module.params['password'] - svn_path = module.params['executable'] or module.get_bin_path('svn', True) - export = module.params['export'] - - os.environ['LANG'] = 'C' - svn = Subversion(module, dest, repo, revision, username, password, svn_path) - - if not os.path.exists(dest): - before = None - local_mods = False - if module.check_mode: - module.exit_json(changed=True) - if not export: - svn.checkout() - else: - svn.export() - elif os.path.exists("%s/.svn" % (dest, )): - # Order matters. Need to get local mods before switch to avoid false - # positives. Need to switch before revert to ensure we are reverting to - # correct repo. - if module.check_mode: - check, before, after = svn.needs_update() - module.exit_json(changed=check, before=before, after=after) - before = svn.get_revision() - local_mods = svn.has_local_mods() - svn.switch() - if local_mods: - if force: - svn.revert() - else: - module.fail_json(msg="ERROR: modified files exist in the repository.") - svn.update() - else: - module.fail_json(msg="ERROR: %s folder already exists, but its not a subversion repository." % (dest, )) - - after = svn.get_revision() - changed = before != after or local_mods - module.exit_json(changed=changed, before=before, after=after) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/alternatives b/library/system/alternatives deleted file mode 100755 index b80ffab944c..00000000000 --- a/library/system/alternatives +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Ansible module to manage symbolic link alternatives. -(c) 2014, Gabe Mulley - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: alternatives -short_description: Manages alternative programs for common commands -description: - - Manages symbolic links using the 'update-alternatives' tool provided on debian-like systems. - - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). -version_added: "1.6" -options: - name: - description: - - The generic name of the link. - required: true - path: - description: - - The path to the real executable that the link should point to. - required: true - link: - description: - - The path to the symbolic link that should point to the real executable. - required: false -requirements: [ update-alternatives ] -''' - -EXAMPLES = ''' -- name: correct java version selected - alternatives: name=java path=/usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - -- name: alternatives link created - alternatives: name=hadoop-conf link=/etc/hadoop/conf path=/etc/hadoop/conf.ansible -''' - -DEFAULT_LINK_PRIORITY = 50 - -def main(): - - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - path = dict(required=True), - link = dict(required=False), - ) - ) - - params = module.params - name = params['name'] - path = params['path'] - link = params['link'] - - UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True) - - current_path = None - all_alternatives = [] - - (rc, query_output, query_error) = module.run_command( - [UPDATE_ALTERNATIVES, '--query', name] - ) - - # Gather the current setting and all alternatives from the query output. - # Query output should look something like this: - - # Name: java - # Link: /usr/bin/java - # Slaves: - # java.1.gz /usr/share/man/man1/java.1.gz - # Status: manual - # Best: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - # Value: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java - - # Alternative: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java - # Priority: 1061 - # Slaves: - # java.1.gz /usr/lib/jvm/java-6-openjdk-amd64/jre/man/man1/java.1.gz - - # Alternative: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - # Priority: 1071 - # Slaves: - # java.1.gz /usr/lib/jvm/java-7-openjdk-amd64/jre/man/man1/java.1.gz - - if rc == 0: - for line in query_output.splitlines(): - split_line = line.split(':') - if len(split_line) == 2: - key = split_line[0] - value = split_line[1].strip() - if key == 'Value': - current_path = value - elif key == 'Alternative': - all_alternatives.append(value) - elif key == 'Link' and not link: - link = value - - if current_path != path: - try: - # install the requested path if necessary - if path not in all_alternatives: - module.run_command( - [UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)], - check_rc=True - ) - - # select the requested path - module.run_command( - [UPDATE_ALTERNATIVES, '--set', name, path], - check_rc=True - ) - - module.exit_json(changed=True) - except subprocess.CalledProcessError, cpe: - module.fail_json(msg=str(dir(cpe))) - else: - module.exit_json(changed=False) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/at b/library/system/at deleted file mode 100644 index c63527563fd..00000000000 --- a/library/system/at +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2014, Richard Isaacson -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: at -short_description: Schedule the execution of a command or script file via the at command. -description: - - Use this module to schedule a command or script file to run once in the future. - - All jobs are executed in the 'a' queue. -version_added: "1.5" -options: - command: - description: - - A command to be executed in the future. - required: false - default: null - script_file: - description: - - An existing script file to be executed in the future. - required: false - default: null - count: - description: - - The count of units in the future to execute the command or script file. - required: true - units: - description: - - The type of units in the future to execute the command or script file. - required: true - choices: ["minutes", "hours", "days", "weeks"] - state: - description: - - The state dictates if the command or script file should be evaluated as present(added) or absent(deleted). - required: false - choices: ["present", "absent"] - default: "present" - unique: - description: - - If a matching job is present a new job will not be added. - required: false - default: false -requirements: - - at -author: Richard Isaacson -''' - -EXAMPLES = ''' -# Schedule a command to execute in 20 minutes as root. -- at: command="ls -d / > /dev/null" count=20 units="minutes" - -# Match a command to an existing job and delete the job. -- at: command="ls -d / > /dev/null" state="absent" - -# Schedule a command to execute in 20 minutes making sure it is unique in the queue. -- at: command="ls -d / > /dev/null" unique=true count=20 units="minutes" -''' - -import os -import tempfile - - -def add_job(module, result, at_cmd, count, units, command, script_file): - at_command = "%s now + %s %s -f %s" % (at_cmd, count, units, script_file) - rc, out, err = module.run_command(at_command, check_rc=True) - if command: - os.unlink(script_file) - result['changed'] = True - - -def delete_job(module, result, at_cmd, command, script_file): - for matching_job in get_matching_jobs(module, at_cmd, script_file): - at_command = "%s -d %s" % (at_cmd, matching_job) - rc, out, err = module.run_command(at_command, check_rc=True) - result['changed'] = True - if command: - os.unlink(script_file) - module.exit_json(**result) - - -def get_matching_jobs(module, at_cmd, script_file): - matching_jobs = [] - - atq_cmd = module.get_bin_path('atq', True) - - # Get list of job numbers for the user. - atq_command = "%s" % atq_cmd - rc, out, err = module.run_command(atq_command, check_rc=True) - current_jobs = out.splitlines() - if len(current_jobs) == 0: - return matching_jobs - - # Read script_file into a string. - script_file_string = open(script_file).read().strip() - - # Loop through the jobs. - # If the script text is contained in a job add job number to list. - for current_job in current_jobs: - split_current_job = current_job.split() - at_command = "%s -c %s" % (at_cmd, split_current_job[0]) - rc, out, err = module.run_command(at_command, check_rc=True) - if script_file_string in out: - matching_jobs.append(split_current_job[0]) - - # Return the list. - return matching_jobs - - -def create_tempfile(command): - filed, script_file = tempfile.mkstemp(prefix='at') - fileh = os.fdopen(filed, 'w') - fileh.write(command) - fileh.close() - return script_file - - -def main(): - - module = AnsibleModule( - argument_spec = dict( - command=dict(required=False, - type='str'), - script_file=dict(required=False, - type='str'), - count=dict(required=False, - type='int'), - units=dict(required=False, - default=None, - choices=['minutes', 'hours', 'days', 'weeks'], - type='str'), - state=dict(required=False, - default='present', - choices=['present', 'absent'], - type='str'), - unique=dict(required=False, - default=False, - type='bool') - ), - mutually_exclusive=[['command', 'script_file']], - required_one_of=[['command', 'script_file']], - supports_check_mode=False - ) - - at_cmd = module.get_bin_path('at', True) - - command = module.params['command'] - script_file = module.params['script_file'] - count = module.params['count'] - units = module.params['units'] - state = module.params['state'] - unique = module.params['unique'] - - if (state == 'present') and (not count or not units): - module.fail_json(msg="present state requires count and units") - - result = {'state': state, 'changed': False} - - # If command transform it into a script_file - if command: - script_file = create_tempfile(command) - - # if absent remove existing and return - if state == 'absent': - delete_job(module, result, at_cmd, command, script_file) - - # if unique if existing return unchanged - if unique: - if len(get_matching_jobs(module, at_cmd, script_file)) != 0: - if command: - os.unlink(script_file) - module.exit_json(**result) - - result['script_file'] = script_file - result['count'] = count - result['units'] = units - - add_job(module, result, at_cmd, count, units, command, script_file) - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/authorized_key b/library/system/authorized_key deleted file mode 100644 index f964113127e..00000000000 --- a/library/system/authorized_key +++ /dev/null @@ -1,421 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Ansible module to add authorized_keys for ssh logins. -(c) 2012, Brad Olson - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: authorized_key -short_description: Adds or removes an SSH authorized key -description: - - Adds or removes an SSH authorized key for a user from a remote host. -version_added: "0.5" -options: - user: - description: - - The username on the remote host whose authorized_keys file will be modified - required: true - default: null - aliases: [] - key: - description: - - The SSH public key, as a string - required: true - default: null - path: - description: - - Alternate path to the authorized_keys file - required: false - default: "(homedir)+/.ssh/authorized_keys" - version_added: "1.2" - manage_dir: - description: - - Whether this module should manage the directory of the authorized key file. If - set, the module will create the directory, as well as set the owner and permissions - of an existing directory. Be sure to - set C(manage_dir=no) if you are using an alternate directory for - authorized_keys, as set with C(path), since you could lock yourself out of - SSH access. See the example below. - required: false - choices: [ "yes", "no" ] - default: "yes" - version_added: "1.2" - state: - description: - - Whether the given key (with the given key_options) should or should not be in the file - required: false - choices: [ "present", "absent" ] - default: "present" - key_options: - description: - - A string of ssh key options to be prepended to the key in the authorized_keys file - required: false - default: null - version_added: "1.4" -description: - - "Adds or removes authorized keys for particular user accounts" -author: Brad Olson -''' - -EXAMPLES = ''' -# Example using key data from a local file on the management machine -- authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" - -# Using alternate directory locations: -- authorized_key: user=charlie - key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" - path='/etc/ssh/authorized_keys/charlie' - manage_dir=no - -# Using with_file -- name: Set up authorized_keys for the deploy user - authorized_key: user=deploy - key="{{ item }}" - with_file: - - public_keys/doe-jane - - public_keys/doe-john - -# Using key_options: -- authorized_key: user=charlie - key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" - key_options='no-port-forwarding,host="10.0.1.1"' -''' - -# Makes sure the public key line is present or absent in the user's .ssh/authorized_keys. -# -# Arguments -# ========= -# user = username -# key = line to add to authorized_keys for user -# path = path to the user's authorized_keys file (default: ~/.ssh/authorized_keys) -# manage_dir = whether to create, and control ownership of the directory (default: true) -# state = absent|present (default: present) -# -# see example in examples/playbooks - -import sys -import os -import pwd -import os.path -import tempfile -import re -import shlex - -class keydict(dict): - - """ a dictionary that maintains the order of keys as they are added """ - - # http://stackoverflow.com/questions/2328235/pythonextend-the-dict-class - - def __init__(self, *args, **kw): - super(keydict,self).__init__(*args, **kw) - self.itemlist = super(keydict,self).keys() - def __setitem__(self, key, value): - self.itemlist.append(key) - super(keydict,self).__setitem__(key, value) - def __iter__(self): - return iter(self.itemlist) - def keys(self): - return self.itemlist - def values(self): - return [self[key] for key in self] - def itervalues(self): - return (self[key] for key in self) - -def keyfile(module, user, write=False, path=None, manage_dir=True): - """ - Calculate name of authorized keys file, optionally creating the - directories and file, properly setting permissions. - - :param str user: name of user in passwd file - :param bool write: if True, write changes to authorized_keys file (creating directories if needed) - :param str path: if not None, use provided path rather than default of '~user/.ssh/authorized_keys' - :param bool manage_dir: if True, create and set ownership of the parent dir of the authorized_keys file - :return: full path string to authorized_keys for user - """ - - try: - user_entry = pwd.getpwnam(user) - except KeyError, e: - module.fail_json(msg="Failed to lookup user %s: %s" % (user, str(e))) - if path is None: - homedir = user_entry.pw_dir - sshdir = os.path.join(homedir, ".ssh") - keysfile = os.path.join(sshdir, "authorized_keys") - else: - sshdir = os.path.dirname(path) - keysfile = path - - if not write: - return keysfile - - uid = user_entry.pw_uid - gid = user_entry.pw_gid - - if manage_dir: - if not os.path.exists(sshdir): - os.mkdir(sshdir, 0700) - if module.selinux_enabled(): - module.set_default_selinux_context(sshdir, False) - os.chown(sshdir, uid, gid) - os.chmod(sshdir, 0700) - - if not os.path.exists(keysfile): - basedir = os.path.dirname(keysfile) - if not os.path.exists(basedir): - os.makedirs(basedir) - try: - f = open(keysfile, "w") #touches file so we can set ownership and perms - finally: - f.close() - if module.selinux_enabled(): - module.set_default_selinux_context(keysfile, False) - - try: - os.chown(keysfile, uid, gid) - os.chmod(keysfile, 0600) - except OSError: - pass - - return keysfile - -def parseoptions(module, options): - ''' - reads a string containing ssh-key options - and returns a dictionary of those options - ''' - options_dict = keydict() #ordered dict - if options: - try: - # the following regex will split on commas while - # ignoring those commas that fall within quotes - regex = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''') - parts = regex.split(options)[1:-1] - for part in parts: - if "=" in part: - (key, value) = part.split("=", 1) - options_dict[key] = value - elif part != ",": - options_dict[part] = None - except: - module.fail_json(msg="invalid option string: %s" % options) - - return options_dict - -def parsekey(module, raw_key): - ''' - parses a key, which may or may not contain a list - of ssh-key options at the beginning - ''' - - VALID_SSH2_KEY_TYPES = [ - 'ssh-ed25519', - 'ecdsa-sha2-nistp256', - 'ecdsa-sha2-nistp384', - 'ecdsa-sha2-nistp521', - 'ssh-dss', - 'ssh-rsa', - ] - - options = None # connection options - key = None # encrypted key string - key_type = None # type of ssh key - type_index = None # index of keytype in key string|list - - # remove comment yaml escapes - raw_key = raw_key.replace('\#', '#') - - # split key safely - lex = shlex.shlex(raw_key) - lex.quotes = [] - lex.commenters = '' #keep comment hashes - lex.whitespace_split = True - key_parts = list(lex) - - for i in range(0, len(key_parts)): - if key_parts[i] in VALID_SSH2_KEY_TYPES: - type_index = i - key_type = key_parts[i] - break - - # check for options - if type_index is None: - return None - elif type_index > 0: - options = " ".join(key_parts[:type_index]) - - # parse the options (if any) - options = parseoptions(module, options) - - # get key after the type index - key = key_parts[(type_index + 1)] - - # set comment to everything after the key - if len(key_parts) > (type_index + 1): - comment = " ".join(key_parts[(type_index + 2):]) - - return (key, key_type, options, comment) - -def readkeys(module, filename): - - if not os.path.isfile(filename): - return {} - - keys = {} - f = open(filename) - for line in f.readlines(): - key_data = parsekey(module, line) - if key_data: - # use key as identifier - keys[key_data[0]] = key_data - else: - # for an invalid line, just append the line - # to the array so it will be re-output later - keys[line] = line - f.close() - return keys - -def writekeys(module, filename, keys): - - fd, tmp_path = tempfile.mkstemp('', 'tmp', os.path.dirname(filename)) - f = open(tmp_path,"w") - try: - for index, key in keys.items(): - try: - (keyhash,type,options,comment) = key - option_str = "" - if options: - option_strings = [] - for option_key in options.keys(): - if options[option_key]: - option_strings.append("%s=%s" % (option_key, options[option_key])) - else: - option_strings.append("%s" % option_key) - - option_str = ",".join(option_strings) - option_str += " " - key_line = "%s%s %s %s\n" % (option_str, type, keyhash, comment) - except: - key_line = key - f.writelines(key_line) - except IOError, e: - module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, str(e))) - f.close() - module.atomic_move(tmp_path, filename) - -def enforce_state(module, params): - """ - Add or remove key. - """ - - user = params["user"] - key = params["key"] - path = params.get("path", None) - manage_dir = params.get("manage_dir", True) - state = params.get("state", "present") - key_options = params.get("key_options", None) - - # extract indivial keys into an array, skipping blank lines and comments - key = [s for s in key.splitlines() if s and not s.startswith('#')] - - - # check current state -- just get the filename, don't create file - do_write = False - params["keyfile"] = keyfile(module, user, do_write, path, manage_dir) - existing_keys = readkeys(module, params["keyfile"]) - - # Check our new keys, if any of them exist we'll continue. - for new_key in key: - parsed_new_key = parsekey(module, new_key) - if key_options is not None: - parsed_options = parseoptions(module, key_options) - parsed_new_key = (parsed_new_key[0], parsed_new_key[1], parsed_options, parsed_new_key[3]) - - if not parsed_new_key: - module.fail_json(msg="invalid key specified: %s" % new_key) - - present = False - matched = False - non_matching_keys = [] - - if parsed_new_key[0] in existing_keys: - present = True - # Then we check if everything matches, including - # the key type and options. If not, we append this - # existing key to the non-matching list - # We only want it to match everything when the state - # is present - if parsed_new_key != existing_keys[parsed_new_key[0]] and state == "present": - non_matching_keys.append(existing_keys[parsed_new_key[0]]) - else: - matched = True - - - # handle idempotent state=present - if state=="present": - if len(non_matching_keys) > 0: - for non_matching_key in non_matching_keys: - if non_matching_key[0] in existing_keys: - del existing_keys[non_matching_key[0]] - do_write = True - - if not matched: - existing_keys[parsed_new_key[0]] = parsed_new_key - do_write = True - - elif state=="absent": - if not matched: - continue - del existing_keys[parsed_new_key[0]] - do_write = True - - if do_write: - if module.check_mode: - module.exit_json(changed=True) - writekeys(module, keyfile(module, user, do_write, path, manage_dir), existing_keys) - params['changed'] = True - else: - if module.check_mode: - module.exit_json(changed=False) - - return params - -def main(): - - module = AnsibleModule( - argument_spec = dict( - user = dict(required=True, type='str'), - key = dict(required=True, type='str'), - path = dict(required=False, type='str'), - manage_dir = dict(required=False, type='bool', default=True), - state = dict(default='present', choices=['absent','present']), - key_options = dict(required=False, type='str'), - unique = dict(default=False, type='bool'), - ), - supports_check_mode=True - ) - - results = enforce_state(module, module.params) - module.exit_json(**results) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/capabilities b/library/system/capabilities deleted file mode 100644 index f4a9f62c0d0..00000000000 --- a/library/system/capabilities +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Nate Coraor -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: capabilities -short_description: Manage Linux capabilities -description: - - This module manipulates files privileges using the Linux capabilities(7) system. -version_added: "1.6" -options: - path: - description: - - Specifies the path to the file to be managed. - required: true - default: null - capability: - description: - - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent)) - required: true - default: null - aliases: [ 'cap' ] - state: - description: - - Whether the entry should be present or absent in the file's capabilities. - choices: [ "present", "absent" ] - default: present -notes: - - The capabilities system will automatically transform operators and flags - into the effective set, so (for example, cap_foo=ep will probably become - cap_foo+ep). This module does not attempt to determine the final operator - and flags to compare, so you will want to ensure that your capabilities - argument matches the final capabilities. -requirements: [] -author: Nate Coraor -''' - -EXAMPLES = ''' -# Set cap_sys_chroot+ep on /foo -- capabilities: path=/foo capability=cap_sys_chroot+ep state=present - -# Remove cap_net_bind_service from /bar -- capabilities: path=/bar capability=cap_net_bind_service state=absent -''' - - -OPS = ( '=', '-', '+' ) - -# ============================================================== - -import os -import tempfile -import re - -class CapabilitiesModule(object): - - platform = 'Linux' - distribution = None - - def __init__(self, module): - self.module = module - self.path = module.params['path'].strip() - self.capability = module.params['capability'].strip().lower() - self.state = module.params['state'] - self.getcap_cmd = module.get_bin_path('getcap', required=True) - self.setcap_cmd = module.get_bin_path('setcap', required=True) - self.capability_tup = self._parse_cap(self.capability, op_required=self.state=='present') - - self.run() - - def run(self): - - current = self.getcap(self.path) - caps = [ cap[0] for cap in current ] - - if self.state == 'present' and self.capability_tup not in current: - # need to add capability - if self.module.check_mode: - self.module.exit_json(changed=True, msg='capabilities changed') - else: - # remove from current cap list if it's already set (but op/flags differ) - current = filter(lambda x: x[0] != self.capability_tup[0], current) - # add new cap with correct op/flags - current.append( self.capability_tup ) - self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) - elif self.state == 'absent' and self.capability_tup[0] in caps: - # need to remove capability - if self.module.check_mode: - self.module.exit_json(changed=True, msg='capabilities changed') - else: - # remove from current cap list and then set current list - current = filter(lambda x: x[0] != self.capability_tup[0], current) - self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) - self.module.exit_json(changed=False, state=self.state) - - def getcap(self, path): - rval = [] - cmd = "%s -v %s" % (self.getcap_cmd, path) - rc, stdout, stderr = self.module.run_command(cmd) - # If file xattrs are set but no caps are set the output will be: - # '/foo =' - # If file xattrs are unset the output will be: - # '/foo' - # If the file does not eixst the output will be (with rc == 0...): - # '/foo (No such file or directory)' - if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1): - self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr) - if stdout.strip() != path: - caps = stdout.split(' =')[1].strip().split() - for cap in caps: - cap = cap.lower() - # getcap condenses capabilities with the same op/flags into a - # comma-separated list, so we have to parse that - if ',' in cap: - cap_group = cap.split(',') - cap_group[-1], op, flags = self._parse_cap(cap_group[-1]) - for subcap in cap_group: - rval.append( ( subcap, op, flags ) ) - else: - rval.append(self._parse_cap(cap)) - return rval - - def setcap(self, path, caps): - caps = ' '.join([ ''.join(cap) for cap in caps ]) - cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path) - rc, stdout, stderr = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr) - else: - return stdout - - def _parse_cap(self, cap, op_required=True): - opind = -1 - try: - i = 0 - while opind == -1: - opind = cap.find(OPS[i]) - i += 1 - except: - if op_required: - self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS)) - else: - return (cap, None, None) - op = cap[opind] - cap, flags = cap.split(op) - return (cap, op, flags) - -# ============================================================== -# main - -def main(): - - # defining module - module = AnsibleModule( - argument_spec = dict( - path = dict(aliases=['key'], required=True), - capability = dict(aliases=['cap'], required=True), - state = dict(default='present', choices=['present', 'absent']), - ), - supports_check_mode=True - ) - - CapabilitiesModule(module) - - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/cron b/library/system/cron deleted file mode 100644 index d14f36253c0..00000000000 --- a/library/system/cron +++ /dev/null @@ -1,524 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2012, Dane Summers -# (c) 2013, Mike Grozak -# (c) 2013, Patrick Callahan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -# Cron Plugin: The goal of this plugin is to provide an indempotent method for -# setting up cron jobs on a host. The script will play well with other manually -# entered crons. Each cron job entered will be preceded with a comment -# describing the job so that it can be found later, which is required to be -# present in order for this plugin to find/modify the job. -# -# This module is based on python-crontab by Martin Owens. -# - -DOCUMENTATION = """ ---- -module: cron -short_description: Manage cron.d and crontab entries. -description: - - Use this module to manage crontab entries. This module allows you to create named - crontab entries, update, or delete them. - - 'The module includes one line with the description of the crontab entry C("#Ansible: ") - corresponding to the "name" passed to the module, which is used by future ansible/module calls - to find/check the state. The "name" parameter should be unique, and changing the "name" value - will result in a new cron task being created (or a different one being removed)' -version_added: "0.9" -options: - name: - description: - - Description of a crontab entry. - default: null - required: true - user: - description: - - The specific user whose crontab should be modified. - required: false - default: root - job: - description: - - The command to execute. Required if state=present. - required: false - default: null - state: - description: - - Whether to ensure the job is present or absent. - required: false - default: present - choices: [ "present", "absent" ] - cron_file: - description: - - If specified, uses this file in cron.d instead of an individual user's crontab. - required: false - default: null - backup: - description: - - If set, create a backup of the crontab before it is modified. - The location of the backup is returned in the C(backup) variable by this module. - required: false - default: false - minute: - description: - - Minute when the job should run ( 0-59, *, */2, etc ) - required: false - default: "*" - hour: - description: - - Hour when the job should run ( 0-23, *, */2, etc ) - required: false - default: "*" - day: - description: - - Day of the month the job should run ( 1-31, *, */2, etc ) - required: false - default: "*" - aliases: [ "dom" ] - month: - description: - - Month of the year the job should run ( 1-12, *, */2, etc ) - required: false - default: "*" - weekday: - description: - - Day of the week that the job should run ( 0-6 for Sunday-Saturday, *, etc ) - required: false - default: "*" - aliases: [ "dow" ] - reboot: - description: - - If the job should be run at reboot. This option is deprecated. Users should use special_time. - version_added: "1.0" - required: false - default: "no" - choices: [ "yes", "no" ] - special_time: - description: - - Special time specification nickname. - version_added: "1.3" - required: false - default: null - choices: [ "reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly" ] -requirements: - - cron -author: Dane Summers -updates: [ 'Mike Grozak', 'Patrick Callahan' ] -""" - -EXAMPLES = ''' -# Ensure a job that runs at 2 and 5 exists. -# Creates an entry like "* 5,2 * * ls -alh > /dev/null" -- cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null" - -# Ensure an old job is no longer present. Removes any job that is prefixed -# by "#Ansible: an old job" from the crontab -- cron: name="an old job" state=absent - -# Creates an entry like "@reboot /some/job.sh" -- cron: name="a job for reboot" special_time=reboot job="/some/job.sh" - -# Creates a cron file under /etc/cron.d -- cron: name="yum autoupdate" weekday="2" minute=0 hour=12 - user="root" job="YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate" - cron_file=ansible_yum-autoupdate - -# Removes a cron file from under /etc/cron.d -- cron: cron_file=ansible_yum-autoupdate state=absent -''' - -import os -import re -import tempfile -import platform -import pipes - -CRONCMD = "/usr/bin/crontab" - -class CronTabError(Exception): - pass - -class CronTab(object): - """ - CronTab object to write time based crontab file - - user - the user of the crontab (defaults to root) - cron_file - a cron file under /etc/cron.d - """ - def __init__(self, module, user=None, cron_file=None): - self.module = module - self.user = user - self.root = (os.getuid() == 0) - self.lines = None - self.ansible = "#Ansible: " - - # select whether we dump additional debug info through syslog - self.syslogging = False - - if cron_file: - self.cron_file = '/etc/cron.d/%s' % cron_file - else: - self.cron_file = None - - self.read() - - def read(self): - # Read in the crontab from the system - self.lines = [] - if self.cron_file: - # read the cronfile - try: - f = open(self.cron_file, 'r') - self.lines = f.read().splitlines() - f.close() - except IOError, e: - # cron file does not exist - return - except: - raise CronTabError("Unexpected error:", sys.exc_info()[0]) - else: - # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME - (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True) - - if rc != 0 and rc != 1: # 1 can mean that there are no jobs. - raise CronTabError("Unable to read crontab") - - lines = out.splitlines() - count = 0 - for l in lines: - if count > 2 or (not re.match( r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and - not re.match( r'# \(/tmp/.*installed on.*\)', l) and - not re.match( r'# \(.*version.*\)', l)): - self.lines.append(l) - count += 1 - - def log_message(self, message): - if self.syslogging: - syslog.syslog(syslog.LOG_NOTICE, 'ansible: "%s"' % message) - - def is_empty(self): - if len(self.lines) == 0: - return True - else: - return False - - def write(self, backup_file=None): - """ - Write the crontab to the system. Saves all information. - """ - if backup_file: - fileh = open(backup_file, 'w') - elif self.cron_file: - fileh = open(self.cron_file, 'w') - else: - filed, path = tempfile.mkstemp(prefix='crontab') - fileh = os.fdopen(filed, 'w') - - fileh.write(self.render()) - fileh.close() - - # return if making a backup - if backup_file: - return - - # Add the entire crontab back to the user crontab - if not self.cron_file: - # quoting shell args for now but really this should be two non-shell calls. FIXME - (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True) - os.unlink(path) - - if rc != 0: - self.module.fail_json(msg=err) - - def add_job(self, name, job): - # Add the comment - self.lines.append("%s%s" % (self.ansible, name)) - - # Add the job - self.lines.append("%s" % (job)) - - def update_job(self, name, job): - return self._update_job(name, job, self.do_add_job) - - def do_add_job(self, lines, comment, job): - lines.append(comment) - - lines.append("%s" % (job)) - - def remove_job(self, name): - return self._update_job(name, "", self.do_remove_job) - - def do_remove_job(self, lines, comment, job): - return None - - def remove_job_file(self): - try: - os.unlink(self.cron_file) - return True - except OSError, e: - # cron file does not exist - return False - except: - raise CronTabError("Unexpected error:", sys.exc_info()[0]) - - def find_job(self, name): - comment = None - for l in self.lines: - if comment is not None: - if comment == name: - return [comment, l] - else: - comment = None - elif re.match( r'%s' % self.ansible, l): - comment = re.sub( r'%s' % self.ansible, '', l) - - return [] - - def get_cron_job(self,minute,hour,day,month,weekday,job,special): - if special: - if self.cron_file: - return "@%s %s %s" % (special, self.user, job) - else: - return "@%s %s" % (special, job) - else: - if self.cron_file: - return "%s %s %s %s %s %s %s" % (minute,hour,day,month,weekday,self.user,job) - else: - return "%s %s %s %s %s %s" % (minute,hour,day,month,weekday,job) - - return None - - def get_jobnames(self): - jobnames = [] - - for l in self.lines: - if re.match( r'%s' % self.ansible, l): - jobnames.append(re.sub( r'%s' % self.ansible, '', l)) - - return jobnames - - def _update_job(self, name, job, addlinesfunction): - ansiblename = "%s%s" % (self.ansible, name) - newlines = [] - comment = None - - for l in self.lines: - if comment is not None: - addlinesfunction(newlines, comment, job) - comment = None - elif l == ansiblename: - comment = l - else: - newlines.append(l) - - self.lines = newlines - - if len(newlines) == 0: - return True - else: - return False # TODO add some more error testing - - def render(self): - """ - Render this crontab as it would be in the crontab. - """ - crons = [] - for cron in self.lines: - crons.append(cron) - - result = '\n'.join(crons) - if result and result[-1] not in ['\n', '\r']: - result += '\n' - return result - - def _read_user_execute(self): - """ - Returns the command line for reading a crontab - """ - user = '' - if self.user: - if platform.system() == 'SunOS': - return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD)) - elif platform.system() == 'AIX': - return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user)) - elif platform.system() == 'HP-UX': - return "%s %s %s" % (CRONCMD , '-l', pipes.quote(self.user)) - else: - user = '-u %s' % pipes.quote(self.user) - return "%s %s %s" % (CRONCMD , user, '-l') - - def _write_execute(self, path): - """ - Return the command line for writing a crontab - """ - user = '' - if self.user: - if platform.system() in ['SunOS', 'HP-UX', 'AIX']: - return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path)) - else: - user = '-u %s' % pipes.quote(self.user) - return "%s %s %s" % (CRONCMD , user, pipes.quote(path)) - - - -#================================================== - -def main(): - # The following example playbooks: - # - # - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null" - # - # - name: do the job - # cron: name="do the job" hour="5,2" job="/some/dir/job.sh" - # - # - name: no job - # cron: name="an old job" state=absent - # - # Would produce: - # # Ansible: check dirs - # * * 5,2 * * ls -alh > /dev/null - # # Ansible: do the job - # * * 5,2 * * /some/dir/job.sh - - module = AnsibleModule( - argument_spec = dict( - name=dict(required=True), - user=dict(required=False), - job=dict(required=False), - cron_file=dict(required=False), - state=dict(default='present', choices=['present', 'absent']), - backup=dict(default=False, type='bool'), - minute=dict(default='*'), - hour=dict(default='*'), - day=dict(aliases=['dom'], default='*'), - month=dict(default='*'), - weekday=dict(aliases=['dow'], default='*'), - reboot=dict(required=False, default=False, type='bool'), - special_time=dict(required=False, - default=None, - choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"], - type='str') - ), - supports_check_mode = False, - ) - - name = module.params['name'] - user = module.params['user'] - job = module.params['job'] - cron_file = module.params['cron_file'] - state = module.params['state'] - backup = module.params['backup'] - minute = module.params['minute'] - hour = module.params['hour'] - day = module.params['day'] - month = module.params['month'] - weekday = module.params['weekday'] - reboot = module.params['reboot'] - special_time = module.params['special_time'] - do_install = state == 'present' - - changed = False - res_args = dict() - - # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option. - os.umask(022) - crontab = CronTab(module, user, cron_file) - - if crontab.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'cron instantiated - name: "%s"' % name) - - # --- user input validation --- - - if (special_time or reboot) and \ - (True in [(x != '*') for x in [minute, hour, day, month, weekday]]): - module.fail_json(msg="You must specify time and date fields or special time.") - - if cron_file and do_install: - if not user: - module.fail_json(msg="To use cron_file=... parameter you must specify user=... as well") - - if reboot and special_time: - module.fail_json(msg="reboot and special_time are mutually exclusive") - - if name is None and do_install: - module.fail_json(msg="You must specify 'name' to install a new cron job") - - if job is None and do_install: - module.fail_json(msg="You must specify 'job' to install a new cron job") - - if job and name is None and not do_install: - module.fail_json(msg="You must specify 'name' to remove a cron job") - - if reboot: - if special_time: - module.fail_json(msg="reboot and special_time are mutually exclusive") - else: - special_time = "reboot" - - # if requested make a backup before making a change - if backup: - (backuph, backup_file) = tempfile.mkstemp(prefix='crontab') - crontab.write(backup_file) - - if crontab.cron_file and not name and not do_install: - changed = crontab.remove_job_file() - module.exit_json(changed=changed,cron_file=cron_file,state=state) - - job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time) - old_job = crontab.find_job(name) - - if do_install: - if len(old_job) == 0: - crontab.add_job(name, job) - changed = True - if len(old_job) > 0 and old_job[1] != job: - crontab.update_job(name, job) - changed = True - else: - if len(old_job) > 0: - crontab.remove_job(name) - changed = True - - res_args = dict( - jobs = crontab.get_jobnames(), changed = changed - ) - - if changed: - crontab.write() - - # retain the backup only if crontab or cron file have changed - if backup: - if changed: - res_args['backup_file'] = backup_file - else: - os.unlink(backup_file) - - if cron_file: - res_args['cron_file'] = cron_file - - module.exit_json(**res_args) - - # --- should never get here - module.exit_json(msg="Unable to execute cron task.") - -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/system/debconf b/library/system/debconf deleted file mode 100644 index 7f5ea0368ca..00000000000 --- a/library/system/debconf +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Ansible module to configure .deb packages. -(c) 2014, Brian Coca - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: debconf -short_description: Configure a .deb package -description: - - Configure a .deb package using debconf-set-selections. Or just query - existing selections. -version_added: "1.6" -notes: - - This module requires the command line debconf tools. - - A number of questions have to be answered (depending on the package). - Use 'debconf-show ' on any Debian or derivative with the package - installed to see questions/settings available. -requirements: [ debconf, debconf-utils ] -options: - name: - description: - - Name of package to configure. - required: true - default: null - aliases: ['pkg'] - question: - description: - - A debconf configuration setting - required: false - default: null - aliases: ['setting', 'selection'] - vtype: - description: - - The type of the value supplied - required: false - default: null - choices: [string, password, boolean, select, multiselect, note, error, title, text] - aliases: [] - value: - description: - - Value to set the configuration to - required: false - default: null - aliases: ['answer'] - unseen: - description: - - Do not set 'seen' flag when pre-seeding - required: false - default: False - aliases: [] -author: Brian Coca - -''' - -EXAMPLES = ''' -# Set default locale to fr_FR.UTF-8 -debconf: name=locales question='locales/default_environment_locale' value=fr_FR.UTF-8 vtype='select' - -# set to generate locales: -debconf: name=locales question='locales/locales_to_be_generated' value='en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8' vtype='multiselect' - -# Accept oracle license -debconf: name='oracle-java7-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select' - -# Specifying package you can register/return the list of questions and current values -debconf: name='tzdata' -''' - -import pipes - -def get_selections(module, pkg): - cmd = [module.get_bin_path('debconf-show', True), pkg] - rc, out, err = module.run_command(' '.join(cmd)) - - if rc != 0: - module.fail_json(msg=err) - - selections = {} - - for line in out.splitlines(): - (key, value) = line.split(':', 1) - selections[ key.strip('*').strip() ] = value.strip() - - return selections - - -def set_selection(module, pkg, question, vtype, value, unseen): - - data = ' '.join([ question, vtype, value ]) - - setsel = module.get_bin_path('debconf-set-selections', True) - cmd = ["echo %s %s |" % (pipes.quote(pkg), pipes.quote(data)), setsel] - if unseen: - cmd.append('-u') - - return module.run_command(' '.join(cmd), use_unsafe_shell=True) - -def main(): - - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True, aliases=['pkg'], type='str'), - question = dict(required=False, aliases=['setting', 'selection'], type='str'), - vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text']), - value= dict(required=False, type='str'), - unseen = dict(required=False, type='bool'), - ), - required_together = ( ['question','vtype', 'value'],), - supports_check_mode=True, - ) - - #TODO: enable passing array of options and/or debconf file from get-selections dump - pkg = module.params["name"] - question = module.params["question"] - vtype = module.params["vtype"] - value = module.params["value"] - unseen = module.params["unseen"] - - prev = get_selections(module, pkg) - diff = '' - - changed = False - msg = "" - - if question is not None: - if vtype is None or value is None: - module.fail_json(msg="when supplying a question you must supply a valid vtype and value") - - if not question in prev or prev[question] != value: - changed = True - - if changed: - if not module.check_mode: - rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen) - if rc: - module.fail_json(msg=e) - - curr = { question: value } - if question in prev: - prev = {question: prev[question]} - else: - prev[question] = '' - - module.exit_json(changed=changed, msg=msg, current=curr, previous=prev) - - module.exit_json(changed=changed, msg=msg, current=prev) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/system/facter b/library/system/facter deleted file mode 100644 index a72cdc6536f..00000000000 --- a/library/system/facter +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - - -DOCUMENTATION = ''' ---- -module: facter -short_description: Runs the discovery program I(facter) on the remote system -description: - - Runs the I(facter) discovery program - (U(https://github.com/puppetlabs/facter)) on the remote system, returning - JSON data that can be useful for inventory purposes. -version_added: "0.2" -options: {} -notes: [] -requirements: [ "facter", "ruby-json" ] -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Example command-line invocation -ansible www.example.net -m facter -''' - -def main(): - module = AnsibleModule( - argument_spec = dict() - ) - - cmd = ["/usr/bin/env", "facter", "--json"] - rc, out, err = module.run_command(cmd, check_rc=True) - module.exit_json(**json.loads(out)) - -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/system/filesystem b/library/system/filesystem deleted file mode 100644 index 064c0d0af86..00000000000 --- a/library/system/filesystem +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Alexander Bulimov -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -author: Alexander Bulimov -module: filesystem -short_description: Makes file system on block device -description: - - This module creates file system. -version_added: "1.2" -options: - fstype: - description: - - File System type to be created. - required: true - dev: - description: - - Target block device. - required: true - force: - choices: [ "yes", "no" ] - default: "no" - description: - - If yes, allows to create new filesystem on devices that already has filesystem. - required: false - opts: - description: - - List of options to be passed to mkfs command. -notes: - - uses mkfs command -''' - -EXAMPLES = ''' -# Create a ext2 filesystem on /dev/sdb1. -- filesystem: fstype=ext2 dev=/dev/sdb1 - -# Create a ext4 filesystem on /dev/sdb1 and check disk blocks. -- filesystem: fstype=ext4 dev=/dev/sdb1 opts="-cc" -''' - -def main(): - module = AnsibleModule( - argument_spec = dict( - fstype=dict(required=True, aliases=['type']), - dev=dict(required=True, aliases=['device']), - opts=dict(), - force=dict(type='bool', default='no'), - ), - supports_check_mode=True, - ) - - dev = module.params['dev'] - fstype = module.params['fstype'] - opts = module.params['opts'] - force = module.boolean(module.params['force']) - - changed = False - - if not os.path.exists(dev): - module.fail_json(msg="Device %s not found."%dev) - - cmd = module.get_bin_path('blkid', required=True) - - rc,raw_fs,err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev)) - fs = raw_fs.strip() - - - if fs == fstype: - module.exit_json(changed=False) - elif fs and not force: - module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite"%(dev,fs), rc=rc, err=err) - - ### create fs - - if module.check_mode: - changed = True - else: - mkfs = module.get_bin_path('mkfs', required=True) - cmd = None - if fstype in ['ext2', 'ext3', 'ext4', 'ext4dev']: - force_flag="-F" - elif fstype in ['btrfs']: - force_flag="-f" - else: - force_flag="" - - if opts is None: - cmd = "%s -t %s %s '%s'" % (mkfs, fstype, force_flag, dev) - else: - cmd = "%s -t %s %s %s '%s'" % (mkfs, fstype, force_flag, opts, dev) - rc,_,err = module.run_command(cmd) - if rc == 0: - changed = True - else: - module.fail_json(msg="Creating filesystem %s on device '%s' failed"%(fstype,dev), rc=rc, err=err) - - module.exit_json(changed=changed) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/firewalld b/library/system/firewalld deleted file mode 100644 index 22db165aad3..00000000000 --- a/library/system/firewalld +++ /dev/null @@ -1,398 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Adam Miller (maxamillion@fedoraproject.org) -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: firewalld -short_description: Manage arbitrary ports/services with firewalld -description: - - This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules -version_added: "1.4" -options: - service: - description: - - "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services" - required: false - default: null - port: - description: - - "Name of a port to add/remove to/from firewalld must be in the form PORT/PROTOCOL" - required: false - default: null - rich_rule: - description: - - "Rich rule to add/remove to/from firewalld" - required: false - default: null - zone: - description: - - 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).' - required: false - default: system-default(public) - choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block"] - permanent: - description: - - "Should this configuration be in the running firewalld configuration or persist across reboots" - required: true - default: true - state: - description: - - "Should this port accept(enabled) or reject(disabled) connections" - required: true - default: enabled - timeout: - description: - - "The amount of time the rule should be in effect for when non-permanent" - required: false - default: 0 -notes: - - Not tested on any debian based system -requirements: [ firewalld >= 0.2.11 ] -author: Adam Miller -''' - -EXAMPLES = ''' -- firewalld: service=https permanent=true state=enabled -- firewalld: port=8081/tcp permanent=true state=disabled -- firewalld: zone=dmz service=http permanent=true state=enabled -- firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled -''' - -import os -import re -import sys - -try: - import firewall.config - FW_VERSION = firewall.config.VERSION - - from firewall.client import FirewallClient - fw = FirewallClient() - if not fw.connected: - raise Exception('failed to connect to the firewalld daemon') -except ImportError: - print "failed=True msg='firewalld required for this module'" - sys.exit(1) -except Exception, e: - print "failed=True msg='%s'" % str(e) - sys.exit(1) - -################ -# port handling -# -def get_port_enabled(zone, port_proto): - if port_proto in fw.getPorts(zone): - return True - else: - return False - -def set_port_enabled(zone, port, protocol, timeout): - fw.addPort(zone, port, protocol, timeout) - -def set_port_disabled(zone, port, protocol): - fw.removePort(zone, port, protocol) - -def get_port_enabled_permanent(zone, port_proto): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - if tuple(port_proto) in fw_settings.getPorts(): - return True - else: - return False - -def set_port_enabled_permanent(zone, port, protocol): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - fw_settings.addPort(port, protocol) - fw_zone.update(fw_settings) - -def set_port_disabled_permanent(zone, port, protocol): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - fw_settings.removePort(port, protocol) - fw_zone.update(fw_settings) - - -#################### -# service handling -# -def get_service_enabled(zone, service): - if service in fw.getServices(zone): - return True - else: - return False - -def set_service_enabled(zone, service, timeout): - fw.addService(zone, service, timeout) - -def set_service_disabled(zone, service): - fw.removeService(zone, service) - -def get_service_enabled_permanent(zone, service): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - if service in fw_settings.getServices(): - return True - else: - return False - -def set_service_enabled_permanent(zone, service): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - fw_settings.addService(service) - fw_zone.update(fw_settings) - -def set_service_disabled_permanent(zone, service): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - fw_settings.removeService(service) - fw_zone.update(fw_settings) - - -#################### -# rich rule handling -# -def get_rich_rule_enabled(zone, rule): - if rule in fw.getRichRules(zone): - return True - else: - return False - -def set_rich_rule_enabled(zone, rule, timeout): - fw.addRichRule(zone, rule, timeout) - -def set_rich_rule_disabled(zone, rule): - fw.removeRichRule(zone, rule) - -def get_rich_rule_enabled_permanent(zone, rule): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - if rule in fw_settings.getRichRules(): - return True - else: - return False - -def set_rich_rule_enabled_permanent(zone, rule): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - fw_settings.addRichRule(rule) - fw_zone.update(fw_settings) - -def set_rich_rule_disabled_permanent(zone, rule): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - fw_settings.removeRichRule(rule) - fw_zone.update(fw_settings) - - -def main(): - - module = AnsibleModule( - argument_spec = dict( - service=dict(required=False,default=None), - port=dict(required=False,default=None), - rich_rule=dict(required=False,default=None), - zone=dict(required=False,default=None), - permanent=dict(type='bool',required=True), - state=dict(choices=['enabled', 'disabled'], required=True), - timeout=dict(type='int',required=False,default=0), - ), - supports_check_mode=True - ) - - ## Pre-run version checking - if FW_VERSION < "0.2.11": - module.fail_json(msg='unsupported version of firewalld, requires >= 2.0.11') - - ## Global Vars - changed=False - msgs = [] - service = module.params['service'] - rich_rule = module.params['rich_rule'] - - if module.params['port'] != None: - port, protocol = module.params['port'].split('/') - if protocol == None: - module.fail_json(msg='improper port format (missing protocol?)') - else: - port = None - - if module.params['zone'] != None: - zone = module.params['zone'] - else: - zone = fw.getDefaultZone() - - permanent = module.params['permanent'] - desired_state = module.params['state'] - timeout = module.params['timeout'] - - ## Check for firewalld running - try: - if fw.connected == False: - module.fail_json(msg='firewalld service must be running') - except AttributeError: - module.fail_json(msg="firewalld connection can't be established,\ - version likely too old. Requires firewalld >= 2.0.11") - - modification_count = 0 - if service != None: - modification_count += 1 - if port != None: - modification_count += 1 - if rich_rule != None: - modification_count += 1 - - if modification_count > 1: - module.fail_json(msg='can only operate on port, service or rich_rule at once') - - if service != None: - if permanent: - is_enabled = get_service_enabled_permanent(zone, service) - msgs.append('Permanent operation') - - if desired_state == "enabled": - if is_enabled == False: - if module.check_mode: - module.exit_json(changed=True) - - set_service_enabled_permanent(zone, service) - changed=True - elif desired_state == "disabled": - if is_enabled == True: - if module.check_mode: - module.exit_json(changed=True) - - set_service_disabled_permanent(zone, service) - changed=True - else: - is_enabled = get_service_enabled(zone, service) - msgs.append('Non-permanent operation') - - - if desired_state == "enabled": - if is_enabled == False: - if module.check_mode: - module.exit_json(changed=True) - - set_service_enabled(zone, service, timeout) - changed=True - elif desired_state == "disabled": - if is_enabled == True: - if module.check_mode: - module.exit_json(changed=True) - - set_service_disabled(zone, service) - changed=True - - if changed == True: - msgs.append("Changed service %s to %s" % (service, desired_state)) - - if port != None: - if permanent: - is_enabled = get_port_enabled_permanent(zone, [port, protocol]) - msgs.append('Permanent operation') - - if desired_state == "enabled": - if is_enabled == False: - if module.check_mode: - module.exit_json(changed=True) - - set_port_enabled_permanent(zone, port, protocol) - changed=True - elif desired_state == "disabled": - if is_enabled == True: - if module.check_mode: - module.exit_json(changed=True) - - set_port_disabled_permanent(zone, port, protocol) - changed=True - else: - is_enabled = get_port_enabled(zone, [port,protocol]) - msgs.append('Non-permanent operation') - - if desired_state == "enabled": - if is_enabled == False: - if module.check_mode: - module.exit_json(changed=True) - - set_port_enabled(zone, port, protocol, timeout) - changed=True - elif desired_state == "disabled": - if is_enabled == True: - if module.check_mode: - module.exit_json(changed=True) - - set_port_disabled(zone, port, protocol) - changed=True - - if changed == True: - msgs.append("Changed port %s to %s" % ("%s/%s" % (port, protocol), \ - desired_state)) - - if rich_rule != None: - if permanent: - is_enabled = get_rich_rule_enabled_permanent(zone, rich_rule) - msgs.append('Permanent operation') - - if desired_state == "enabled": - if is_enabled == False: - if module.check_mode: - module.exit_json(changed=True) - - set_rich_rule_enabled_permanent(zone, rich_rule) - changed=True - elif desired_state == "disabled": - if is_enabled == True: - if module.check_mode: - module.exit_json(changed=True) - - set_rich_rule_disabled_permanent(zone, rich_rule) - changed=True - else: - is_enabled = get_rich_rule_enabled(zone, rich_rule) - msgs.append('Non-permanent operation') - - if desired_state == "enabled": - if is_enabled == False: - if module.check_mode: - module.exit_json(changed=True) - - set_rich_rule_enabled(zone, rich_rule, timeout) - changed=True - elif desired_state == "disabled": - if is_enabled == True: - if module.check_mode: - module.exit_json(changed=True) - - set_rich_rule_disabled(zone, rich_rule) - changed=True - - if changed == True: - msgs.append("Changed rich_rule %s to %s" % (rich_rule, desired_state)) - - module.exit_json(changed=changed, msg=', '.join(msgs)) - - -################################################# -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/system/getent b/library/system/getent deleted file mode 100644 index 0173618f699..00000000000 --- a/library/system/getent +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Brian Coca -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - - -DOCUMENTATION = ''' ---- -module: getent -short_description: a wrapper to the unix getent utility -description: - - Runs getent against one of it's various databases and returns information into - the host's facts -version_added: "1.8" -options: - database: - required: True - description: - - the name of a getent database supported by the target system (passwd, group, - hosts, etc). - key: - required: False - default: '' - description: - - key from which to return values from the specified database, otherwise the - full contents are returned. - split: - required: False - default: None - description: - - "character used to split the database values into lists/arrays such as ':' or '\t', otherwise it will try to pick one depending on the database" - fail_key: - required: False - default: True - description: - - If a supplied key is missing this will make the task fail if True - -notes: - - "Not all databases support enumeration, check system documentation for details" -requirements: [ ] -author: Brian Coca -''' - -EXAMPLES = ''' -# get root user info -- getent: database=passwd key=root - register: root_info - -# get all groups -- getent: database=group split=':' - register: groups - -# get all hosts, split by tab -- getent: database=hosts - register: hosts - -# get http service info, no error if missing -- getent: database=services key=http fail_key=False - register: http_info - -# get user password hash (requires sudo/root) -- getent: database=shadow key=www-data split=: - register: pw_hash - -''' - -def main(): - module = AnsibleModule( - argument_spec = dict( - database = dict(required=True), - key = dict(required=False, default=None), - split = dict(required=False, default=None), - fail_key = dict(required=False, default=True), - ), - supports_check_mode = True, - ) - - colon = [ 'passwd', 'shadow', 'group', 'gshadow' ] - - database = module.params['database'] - key = module.params.get('key') - split = module.params.get('split') - fail_key = module.params.get('fail_key') - - getent_bin = module.get_bin_path('getent', True) - - if key is not None: - cmd = [ getent_bin, database, key ] - else: - cmd = [ getent_bin, database ] - - if split is None and database in colon: - split = ':' - - try: - rc, out, err = module.run_command(cmd) - except Exception, e: - module.fail_json(msg=str(e)) - - msg = "Unexpected failure!" - dbtree = 'getent_%s' % database - results = { dbtree: {} } - - if rc == 0: - for line in out.splitlines(): - record = line.split(split) - results[dbtree][record[0]] = record[1:] - - module.exit_json(ansible_facts=results) - - elif rc == 1: - msg = "Missing arguments, or database unknown." - elif rc == 2: - msg = "One or more supplied key could not be found in the database." - if not fail_key: - results[dbtree][key] = None - module.exit_json(ansible_facts=results, msg=msg) - elif rc == 3: - msg = "Enumeration not supported on this database." - - module.fail_json(msg=msg) - -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/system/group b/library/system/group deleted file mode 100644 index 617de7c2857..00000000000 --- a/library/system/group +++ /dev/null @@ -1,403 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Stephen Fromm -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: group -author: Stephen Fromm -version_added: "0.0.2" -short_description: Add or remove groups -requirements: [ groupadd, groupdel, groupmod ] -description: - - Manage presence of groups on a host. -options: - name: - required: true - description: - - Name of the group to manage. - gid: - required: false - description: - - Optional I(GID) to set for the group. - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the group should be present or not on the remote host. - system: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - If I(yes), indicates that the group created is a system group. - -''' - -EXAMPLES = ''' -# Example group command from Ansible Playbooks -- group: name=somegroup state=present -''' - -import grp -import syslog -import platform - -class Group(object): - """ - This is a generic Group manipulation class that is subclassed - based on platform. - - A subclass may wish to override the following action methods:- - - group_del() - - group_add() - - group_mod() - - All subclasses MUST define platform and distribution (which may be None). - """ - - platform = 'Generic' - distribution = None - GROUPFILE = '/etc/group' - - def __new__(cls, *args, **kwargs): - return load_platform_subclass(Group, args, kwargs) - - def __init__(self, module): - self.module = module - self.state = module.params['state'] - self.name = module.params['name'] - self.gid = module.params['gid'] - self.system = module.params['system'] - self.syslogging = False - - def execute_command(self, cmd): - if self.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd)) - - return self.module.run_command(cmd) - - def group_del(self): - cmd = [self.module.get_bin_path('groupdel', True), self.name] - return self.execute_command(cmd) - - def group_add(self, **kwargs): - cmd = [self.module.get_bin_path('groupadd', True)] - for key in kwargs: - if key == 'gid' and kwargs[key] is not None: - cmd.append('-g') - cmd.append(kwargs[key]) - elif key == 'system' and kwargs[key] == True: - cmd.append('-r') - cmd.append(self.name) - return self.execute_command(cmd) - - def group_mod(self, **kwargs): - cmd = [self.module.get_bin_path('groupmod', True)] - info = self.group_info() - for key in kwargs: - if key == 'gid': - if kwargs[key] is not None and info[2] != int(kwargs[key]): - cmd.append('-g') - cmd.append(kwargs[key]) - if len(cmd) == 1: - return (None, '', '') - if self.module.check_mode: - return (0, '', '') - cmd.append(self.name) - return self.execute_command(cmd) - - def group_exists(self): - try: - if grp.getgrnam(self.name): - return True - except KeyError: - return False - - def group_info(self): - if not self.group_exists(): - return False - try: - info = list(grp.getgrnam(self.name)) - except KeyError: - return False - return info - -# =========================================== - -class SunOS(Group): - """ - This is a SunOS Group manipulation class. Solaris doesn't have - the 'system' group concept. - - This overrides the following methods from the generic class:- - - group_add() - """ - - platform = 'SunOS' - distribution = None - GROUPFILE = '/etc/group' - - def group_add(self, **kwargs): - cmd = [self.module.get_bin_path('groupadd', True)] - for key in kwargs: - if key == 'gid' and kwargs[key] is not None: - cmd.append('-g') - cmd.append(kwargs[key]) - cmd.append(self.name) - return self.execute_command(cmd) - - -# =========================================== - -class AIX(Group): - """ - This is a AIX Group manipulation class. - - This overrides the following methods from the generic class:- - - group_del() - - group_add() - - group_mod() - """ - - platform = 'AIX' - distribution = None - GROUPFILE = '/etc/group' - - def group_del(self): - cmd = [self.module.get_bin_path('rmgroup', True), self.name] - return self.execute_command(cmd) - - def group_add(self, **kwargs): - cmd = [self.module.get_bin_path('mkgroup', True)] - for key in kwargs: - if key == 'gid' and kwargs[key] is not None: - cmd.append('id='+kwargs[key]) - elif key == 'system' and kwargs[key] == True: - cmd.append('-a') - cmd.append(self.name) - return self.execute_command(cmd) - - def group_mod(self, **kwargs): - cmd = [self.module.get_bin_path('chgroup', True)] - info = self.group_info() - for key in kwargs: - if key == 'gid': - if kwargs[key] is not None and info[2] != int(kwargs[key]): - cmd.append('id='+kwargs[key]) - if len(cmd) == 1: - return (None, '', '') - if self.module.check_mode: - return (0, '', '') - cmd.append(self.name) - return self.execute_command(cmd) - -# =========================================== - -class FreeBsdGroup(Group): - """ - This is a FreeBSD Group manipulation class. - - This overrides the following methods from the generic class:- - - group_del() - - group_add() - - group_mod() - """ - - platform = 'FreeBSD' - distribution = None - GROUPFILE = '/etc/group' - - def group_del(self): - cmd = [self.module.get_bin_path('pw', True), 'groupdel', self.name] - return self.execute_command(cmd) - - def group_add(self, **kwargs): - cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name] - if self.gid is not None: - cmd.append('-g %d' % int(self.gid)) - return self.execute_command(cmd) - - def group_mod(self, **kwargs): - cmd = [self.module.get_bin_path('pw', True), 'groupmod', self.name] - info = self.group_info() - cmd_len = len(cmd) - if self.gid is not None and int(self.gid) != info[2]: - cmd.append('-g %d' % int(self.gid)) - # modify the group if cmd will do anything - if cmd_len != len(cmd): - if self.module.check_mode: - return (0, '', '') - return self.execute_command(cmd) - return (None, '', '') - -# =========================================== - -class OpenBsdGroup(Group): - """ - This is a OpenBSD Group manipulation class. - - This overrides the following methods from the generic class:- - - group_del() - - group_add() - - group_mod() - """ - - platform = 'OpenBSD' - distribution = None - GROUPFILE = '/etc/group' - - def group_del(self): - cmd = [self.module.get_bin_path('groupdel', True), self.name] - return self.execute_command(cmd) - - def group_add(self, **kwargs): - cmd = [self.module.get_bin_path('groupadd', True)] - if self.gid is not None: - cmd.append('-g') - cmd.append('%d' % int(self.gid)) - cmd.append(self.name) - return self.execute_command(cmd) - - def group_mod(self, **kwargs): - cmd = [self.module.get_bin_path('groupmod', True)] - info = self.group_info() - cmd_len = len(cmd) - if self.gid is not None and int(self.gid) != info[2]: - cmd.append('-g') - cmd.append('%d' % int(self.gid)) - if len(cmd) == 1: - return (None, '', '') - if self.module.check_mode: - return (0, '', '') - cmd.append(self.name) - return self.execute_command(cmd) - -# =========================================== - -class NetBsdGroup(Group): - """ - This is a NetBSD Group manipulation class. - - This overrides the following methods from the generic class:- - - group_del() - - group_add() - - group_mod() - """ - - platform = 'NetBSD' - distribution = None - GROUPFILE = '/etc/group' - - def group_del(self): - cmd = [self.module.get_bin_path('groupdel', True), self.name] - return self.execute_command(cmd) - - def group_add(self, **kwargs): - cmd = [self.module.get_bin_path('groupadd', True)] - if self.gid is not None: - cmd.append('-g') - cmd.append('%d' % int(self.gid)) - cmd.append(self.name) - return self.execute_command(cmd) - - def group_mod(self, **kwargs): - cmd = [self.module.get_bin_path('groupmod', True)] - info = self.group_info() - cmd_len = len(cmd) - if self.gid is not None and int(self.gid) != info[2]: - cmd.append('-g') - cmd.append('%d' % int(self.gid)) - if len(cmd) == 1: - return (None, '', '') - if self.module.check_mode: - return (0, '', '') - cmd.append(self.name) - return self.execute_command(cmd) - -# =========================================== - -def main(): - module = AnsibleModule( - argument_spec = dict( - state=dict(default='present', choices=['present', 'absent'], type='str'), - name=dict(required=True, type='str'), - gid=dict(default=None, type='str'), - system=dict(default=False, type='bool'), - ), - supports_check_mode=True - ) - - group = Group(module) - - if group.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'Group instantiated - platform %s' % group.platform) - if user.distribution: - syslog.syslog(syslog.LOG_NOTICE, 'Group instantiated - distribution %s' % group.distribution) - - rc = None - out = '' - err = '' - result = {} - result['name'] = group.name - result['state'] = group.state - - if group.state == 'absent': - - if group.group_exists(): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = group.group_del() - if rc != 0: - module.fail_json(name=group.name, msg=err) - - elif group.state == 'present': - - if not group.group_exists(): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = group.group_add(gid=group.gid, system=group.system) - else: - (rc, out, err) = group.group_mod(gid=group.gid) - - if rc is not None and rc != 0: - module.fail_json(name=group.name, msg=err) - - if rc is None: - result['changed'] = False - else: - result['changed'] = True - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - if group.group_exists(): - info = group.group_info() - result['system'] = group.system - result['gid'] = info[2] - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/hostname b/library/system/hostname deleted file mode 100755 index 50eaec12ff5..00000000000 --- a/library/system/hostname +++ /dev/null @@ -1,437 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Hiroaki Nakamura -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: hostname -author: Hiroaki Nakamura -version_added: "1.4" -short_description: Manage hostname -requirements: [ hostname ] -description: - - Set system's hostname - - Currently implemented on Debian, Ubuntu, Fedora, RedHat, openSUSE, Linaro, ScientificLinux, Arch, CentOS, AMI. -options: - name: - required: true - description: - - Name of the host -''' - -EXAMPLES = ''' -- hostname: name=web01 -''' - -from distutils.version import LooseVersion - -# import module snippets -from ansible.module_utils.basic import * - - -# wrap get_distribution_version in case it returns a string -def _get_distribution_version(): - distribution_version = get_distribution_version() - - if type(distribution_version) is str: - distribution_version = 0 - elif type(distribution_version) is None: - distribution_version = 0 - - return distribution_version - - -class UnimplementedStrategy(object): - def __init__(self, module): - self.module = module - - def get_current_hostname(self): - self.unimplemented_error() - - def set_current_hostname(self, name): - self.unimplemented_error() - - def get_permanent_hostname(self): - self.unimplemented_error() - - def set_permanent_hostname(self, name): - self.unimplemented_error() - - def unimplemented_error(self): - platform = get_platform() - distribution = get_distribution() - if distribution is not None: - msg_platform = '%s (%s)' % (platform, distribution) - else: - msg_platform = platform - self.module.fail_json( - msg='hostname module cannot be used on platform %s' % msg_platform) - -class Hostname(object): - """ - This is a generic Hostname manipulation class that is subclassed - based on platform. - - A subclass may wish to set different strategy instance to self.strategy. - - All subclasses MUST define platform and distribution (which may be None). - """ - - platform = 'Generic' - distribution = None - strategy_class = UnimplementedStrategy - - def __new__(cls, *args, **kwargs): - return load_platform_subclass(Hostname, args, kwargs) - - def __init__(self, module): - self.module = module - self.name = module.params['name'] - self.strategy = self.strategy_class(module) - - def get_current_hostname(self): - return self.strategy.get_current_hostname() - - def set_current_hostname(self, name): - self.strategy.set_current_hostname(name) - - def get_permanent_hostname(self): - return self.strategy.get_permanent_hostname() - - def set_permanent_hostname(self, name): - self.strategy.set_permanent_hostname(name) - -class GenericStrategy(object): - """ - This is a generic Hostname manipulation strategy class. - - A subclass may wish to override some or all of these methods. - - get_current_hostname() - - get_permanent_hostname() - - set_current_hostname(name) - - set_permanent_hostname(name) - """ - def __init__(self, module): - self.module = module - - HOSTNAME_CMD = '/bin/hostname' - - def get_current_hostname(self): - cmd = [self.HOSTNAME_CMD] - rc, out, err = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % - (rc, out, err)) - return out.strip() - - def set_current_hostname(self, name): - cmd = [self.HOSTNAME_CMD, name] - rc, out, err = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % - (rc, out, err)) - - def get_permanent_hostname(self): - return None - - def set_permanent_hostname(self, name): - pass - - -# =========================================== - -class DebianStrategy(GenericStrategy): - """ - This is a Debian family Hostname manipulation strategy class - it edits - the /etc/hostname file. - """ - - HOSTNAME_FILE = '/etc/hostname' - - def get_permanent_hostname(self): - if not os.path.isfile(self.HOSTNAME_FILE): - try: - open(self.HOSTNAME_FILE, "a").write("") - except IOError, err: - self.module.fail_json(msg="failed to write file: %s" % - str(err)) - try: - f = open(self.HOSTNAME_FILE) - try: - return f.read().strip() - finally: - f.close() - except Exception, err: - self.module.fail_json(msg="failed to read hostname: %s" % - str(err)) - - def set_permanent_hostname(self, name): - try: - f = open(self.HOSTNAME_FILE, 'w+') - try: - f.write("%s\n" % name) - finally: - f.close() - except Exception, err: - self.module.fail_json(msg="failed to update hostname: %s" % - str(err)) - - -# =========================================== - -class RedHatStrategy(GenericStrategy): - """ - This is a Redhat Hostname strategy class - it edits the - /etc/sysconfig/network file. - """ - NETWORK_FILE = '/etc/sysconfig/network' - - def get_permanent_hostname(self): - try: - f = open(self.NETWORK_FILE, 'rb') - try: - for line in f.readlines(): - if line.startswith('HOSTNAME'): - k, v = line.split('=') - return v.strip() - finally: - f.close() - except Exception, err: - self.module.fail_json(msg="failed to read hostname: %s" % - str(err)) - - def set_permanent_hostname(self, name): - try: - lines = [] - found = False - f = open(self.NETWORK_FILE, 'rb') - try: - for line in f.readlines(): - if line.startswith('HOSTNAME'): - lines.append("HOSTNAME=%s\n" % name) - found = True - else: - lines.append(line) - finally: - f.close() - if not found: - lines.append("HOSTNAME=%s\n" % name) - f = open(self.NETWORK_FILE, 'w+') - try: - f.writelines(lines) - finally: - f.close() - except Exception, err: - self.module.fail_json(msg="failed to update hostname: %s" % - str(err)) - - -# =========================================== - -class FedoraStrategy(GenericStrategy): - """ - This is a Fedora family Hostname manipulation strategy class - it uses - the hostnamectl command. - """ - - def get_current_hostname(self): - cmd = ['hostname'] - rc, out, err = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % - (rc, out, err)) - return out.strip() - - def set_current_hostname(self, name): - cmd = ['hostnamectl', '--transient', 'set-hostname', name] - rc, out, err = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % - (rc, out, err)) - - def get_permanent_hostname(self): - cmd = 'hostnamectl --static status' - rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True) - if rc != 0: - self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % - (rc, out, err)) - return out.strip() - - def set_permanent_hostname(self, name): - cmd = ['hostnamectl', '--pretty', 'set-hostname', name] - rc, out, err = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % - (rc, out, err)) - cmd = ['hostnamectl', '--static', 'set-hostname', name] - rc, out, err = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % - (rc, out, err)) - - -# =========================================== - -class OpenRCStrategy(GenericStrategy): - """ - This is a Gentoo (OpenRC) Hostname manipulation strategy class - it edits - the /etc/conf.d/hostname file. - """ - - HOSTNAME_FILE = '/etc/conf.d/hostname' - - def get_permanent_hostname(self): - try: - with open(self.HOSTNAME_FILE, 'r') as f: - for line in f: - line = line.strip() - if line.startswith('hostname='): - return line[10:].strip('"') - return None - except Exception, err: - self.module.fail_json(msg="failed to read hostname: %s" % - str(err)) - - def set_permanent_hostname(self, name): - try: - with open(self.HOSTNAME_FILE, 'r') as f: - lines = [x.strip() for x in f] - - for i, line in enumerate(lines): - if line.startswith('hostname='): - lines[i] = 'hostname="%s"' % name - break - - with open(self.HOSTNAME_FILE, 'w') as f: - f.write('\n'.join(lines) + '\n') - except Exception, err: - self.module.fail_json(msg="failed to update hostname: %s" % - str(err)) - -# =========================================== - -class FedoraHostname(Hostname): - platform = 'Linux' - distribution = 'Fedora' - strategy_class = FedoraStrategy - -class OpenSUSEHostname(Hostname): - platform = 'Linux' - distribution = 'Opensuse ' - strategy_class = FedoraStrategy - -class ArchHostname(Hostname): - platform = 'Linux' - distribution = 'Arch' - strategy_class = FedoraStrategy - -class RedHat5Hostname(Hostname): - platform = 'Linux' - distribution = 'Redhat' - strategy_class = RedHatStrategy - -class RedHatServerHostname(Hostname): - platform = 'Linux' - distribution = 'Red hat enterprise linux server' - distribution_version = _get_distribution_version() - if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy - else: - strategy_class = RedHatStrategy - -class RedHatWorkstationHostname(Hostname): - platform = 'Linux' - distribution = 'Red hat enterprise linux workstation' - distribution_version = _get_distribution_version() - if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy - else: - strategy_class = RedHatStrategy - -class CentOSHostname(Hostname): - platform = 'Linux' - distribution = 'Centos' - strategy_class = RedHatStrategy - -class CentOSLinuxHostname(Hostname): - platform = 'Linux' - distribution = 'Centos linux' - strategy_class = FedoraStrategy - -class ScientificHostname(Hostname): - platform = 'Linux' - distribution = 'Scientific' - strategy_class = RedHatStrategy - -class ScientificLinuxHostname(Hostname): - platform = 'Linux' - distribution = 'Scientific linux' - strategy_class = RedHatStrategy - -class AmazonLinuxHostname(Hostname): - platform = 'Linux' - distribution = 'Amazon' - strategy_class = RedHatStrategy - -class DebianHostname(Hostname): - platform = 'Linux' - distribution = 'Debian' - strategy_class = DebianStrategy - -class UbuntuHostname(Hostname): - platform = 'Linux' - distribution = 'Ubuntu' - strategy_class = DebianStrategy - -class LinaroHostname(Hostname): - platform = 'Linux' - distribution = 'Linaro' - strategy_class = DebianStrategy - -class GentooHostname(Hostname): - platform = 'Linux' - distribution = 'Gentoo base system' - strategy_class = OpenRCStrategy - -# =========================================== - -def main(): - module = AnsibleModule( - argument_spec = dict( - name=dict(required=True, type='str') - ) - ) - - hostname = Hostname(module) - - changed = False - name = module.params['name'] - current_name = hostname.get_current_hostname() - if current_name != name: - hostname.set_current_hostname(name) - changed = True - - permanent_name = hostname.get_permanent_hostname() - if permanent_name != name: - hostname.set_permanent_hostname(name) - changed = True - - module.exit_json(changed=changed, name=name) - -main() diff --git a/library/system/kernel_blacklist b/library/system/kernel_blacklist deleted file mode 100644 index 6af08c0788c..00000000000 --- a/library/system/kernel_blacklist +++ /dev/null @@ -1,141 +0,0 @@ -#!/usr/bin/python -# encoding: utf-8 -*- - -# (c) 2013, Matthias Vogelgesang -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import os -import re - - -DOCUMENTATION = ''' ---- -module: kernel_blacklist -author: Matthias Vogelgesang -version_added: 1.4 -short_description: Blacklist kernel modules -description: - - Add or remove kernel modules from blacklist. -options: - name: - required: true - description: - - Name of kernel module to black- or whitelist. - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the module should be present in the blacklist or absent. - blacklist_file: - required: false - description: - - If specified, use this blacklist file instead of - C(/etc/modprobe.d/blacklist-ansible.conf). - default: null -requirements: [] -''' - -EXAMPLES = ''' -# Blacklist the nouveau driver module -- kernel_blacklist: name=nouveau state=present -''' - - -class Blacklist(object): - def __init__(self, module, filename): - if not os.path.exists(filename): - open(filename, 'a').close() - - self.filename = filename - self.module = module - - def get_pattern(self): - return '^blacklist\s*' + self.module + '$' - - def readlines(self): - f = open(self.filename, 'r') - lines = f.readlines() - f.close() - return lines - - def module_listed(self): - lines = self.readlines() - pattern = self.get_pattern() - - for line in lines: - stripped = line.strip() - if stripped.startswith('#'): - continue - - if re.match(pattern, stripped): - return True - - return False - - def remove_module(self): - lines = self.readlines() - pattern = self.get_pattern() - - f = open(self.filename, 'w') - - for line in lines: - if not re.match(pattern, line.strip()): - f.write(line) - - f.close() - - def add_module(self): - f = open(self.filename, 'a') - f.write('blacklist %s\n' % self.module) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(required=False, choices=['present', 'absent'], - default='present'), - blacklist_file=dict(required=False, default=None) - ), - supports_check_mode=False, - ) - - args = dict(changed=False, failed=False, - name=module.params['name'], state=module.params['state']) - - filename = '/etc/modprobe.d/blacklist-ansible.conf' - - if module.params['blacklist_file']: - filename = module.params['blacklist_file'] - - blacklist = Blacklist(args['name'], filename) - - if blacklist.module_listed(): - if args['state'] == 'absent': - blacklist.remove_module() - args['changed'] = True - else: - if args['state'] == 'present': - blacklist.add_module() - args['changed'] = True - - module.exit_json(**args) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/locale_gen b/library/system/locale_gen deleted file mode 100644 index 12eab8dbc8f..00000000000 --- a/library/system/locale_gen +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -import os -import os.path -from subprocess import Popen, PIPE, call - -DOCUMENTATION = ''' ---- -module: locale_gen -short_description: Creates of removes locales. -description: - - Manages locales by editing /etc/locale.gen and invoking locale-gen. -version_added: "1.6" -options: - name: - description: - - Name and encoding of the locale, such as "en_GB.UTF-8". - required: true - default: null - aliases: [] - state: - description: - - Whether the locale shall be present. - required: false - choices: ["present", "absent"] - default: "present" -''' - -EXAMPLES = ''' -# Ensure a locale exists. -- locale_gen: name=de_CH.UTF-8 state=present -''' - -# =========================================== -# location module specific support methods. -# - -def is_present(name): - """Checks if the given locale is currently installed.""" - output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0] - return any(fix_case(name) == fix_case(line) for line in output.splitlines()) - -def fix_case(name): - """locale -a might return the encoding in either lower or upper case. - Passing through this function makes them uniform for comparisons.""" - return name.replace(".utf8", ".UTF-8") - -def replace_line(existing_line, new_line): - """Replaces lines in /etc/locale.gen""" - with open("/etc/locale.gen", "r") as f: - lines = [line.replace(existing_line, new_line) for line in f] - with open("/etc/locale.gen", "w") as f: - f.write("".join(lines)) - -def apply_change(targetState, name, encoding): - """Create or remove locale. - - Keyword arguments: - targetState -- Desired state, either present or absent. - name -- Name including encoding such as de_CH.UTF-8. - encoding -- Encoding such as UTF-8. - """ - if targetState=="present": - # Create locale. - replace_line("# "+name+" "+encoding, name+" "+encoding) - else: - # Delete locale. - replace_line(name+" "+encoding, "# "+name+" "+encoding) - - localeGenExitValue = call("locale-gen") - if localeGenExitValue!=0: - raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue)) - -def apply_change_ubuntu(targetState, name, encoding): - """Create or remove locale. - - Keyword arguments: - targetState -- Desired state, either present or absent. - name -- Name including encoding such as de_CH.UTF-8. - encoding -- Encoding such as UTF-8. - """ - if targetState=="present": - # Create locale. - # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local - localeGenExitValue = call(["locale-gen", name]) - else: - # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales. - with open("/var/lib/locales/supported.d/local", "r") as f: - content = f.readlines() - with open("/var/lib/locales/supported.d/local", "w") as f: - for line in content: - if line!=(name+" "+encoding+"\n"): - f.write(line) - # Purge locales and regenerate. - # Please provide a patch if you know how to avoid regenerating the locales to keep! - localeGenExitValue = call(["locale-gen", "--purge"]) - - if localeGenExitValue!=0: - raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue)) - -# ============================================================== -# main - -def main(): - - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - state = dict(choices=['present','absent'], required=True), - ), - supports_check_mode=True - ) - - name = module.params['name'] - if not "." in name: - module.fail_json(msg="Locale does not match pattern. Did you specify the encoding?") - state = module.params['state'] - - if not os.path.exists("/etc/locale.gen"): - if os.path.exists("/var/lib/locales/supported.d/local"): - # Ubuntu created its own system to manage locales. - ubuntuMode = True - else: - module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package “locales” installed?") - else: - # We found the common way to manage locales. - ubuntuMode = False - - prev_state = "present" if is_present(name) else "absent" - changed = (prev_state!=state) - - if module.check_mode: - module.exit_json(changed=changed) - else: - encoding = name.split(".")[1] - if changed: - try: - if ubuntuMode==False: - apply_change(state, name, encoding) - else: - apply_change_ubuntu(state, name, encoding) - except EnvironmentError as e: - module.fail_json(msg=e.strerror, exitValue=e.errno) - - module.exit_json(name=name, changed=changed, msg="OK") - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/system/lvg b/library/system/lvg deleted file mode 100644 index b7a86a27208..00000000000 --- a/library/system/lvg +++ /dev/null @@ -1,253 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Alexander Bulimov -# based on lvol module by Jeroen Hoekx -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -author: Alexander Bulimov -module: lvg -short_description: Configure LVM volume groups -description: - - This module creates, removes or resizes volume groups. -version_added: "1.1" -options: - vg: - description: - - The name of the volume group. - required: true - pvs: - description: - - List of comma-separated devices to use as physical devices in this volume group. Required when creating or resizing volume group. - required: false - pesize: - description: - - The size of the physical extent in megabytes. Must be a power of 2. - default: 4 - required: false - vg_options: - description: - - Additional options to pass to C(vgcreate) when creating the volume group. - default: null - required: false - version_added: "1.6" - state: - choices: [ "present", "absent" ] - default: present - description: - - Control if the volume group exists. - required: false - force: - choices: [ "yes", "no" ] - default: "no" - description: - - If yes, allows to remove volume group with logical volumes. - required: false -notes: - - module does not modify PE size for already present volume group -''' - -EXAMPLES = ''' -# Create a volume group on top of /dev/sda1 with physical extent size = 32MB. -- lvg: vg=vg.services pvs=/dev/sda1 pesize=32 - -# Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5. -# If, for example, we already have VG vg.services on top of /dev/sdb1, -# this VG will be extended by /dev/sdc5. Or if vg.services was created on -# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5, -# and then reduce by /dev/sda5. -- lvg: vg=vg.services pvs=/dev/sdb1,/dev/sdc5 - -# Remove a volume group with name vg.services. -- lvg: vg=vg.services state=absent -''' - -def parse_vgs(data): - vgs = [] - for line in data.splitlines(): - parts = line.strip().split(';') - vgs.append({ - 'name': parts[0], - 'pv_count': int(parts[1]), - 'lv_count': int(parts[2]), - }) - return vgs - -def find_mapper_device_name(module, dm_device): - dmsetup_cmd = module.get_bin_path('dmsetup', True) - mapper_prefix = '/dev/mapper/' - rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device)) - if rc != 0: - module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err) - mapper_device = mapper_prefix + dm_name.rstrip() - return mapper_device - -def parse_pvs(module, data): - pvs = [] - dm_prefix = '/dev/dm-' - for line in data.splitlines(): - parts = line.strip().split(';') - if parts[0].startswith(dm_prefix): - parts[0] = find_mapper_device_name(module, parts[0]) - pvs.append({ - 'name': parts[0], - 'vg_name': parts[1], - }) - return pvs - -def main(): - module = AnsibleModule( - argument_spec = dict( - vg=dict(required=True), - pvs=dict(type='list'), - pesize=dict(type='int', default=4), - vg_options=dict(default=''), - state=dict(choices=["absent", "present"], default='present'), - force=dict(type='bool', default='no'), - ), - supports_check_mode=True, - ) - - vg = module.params['vg'] - state = module.params['state'] - force = module.boolean(module.params['force']) - pesize = module.params['pesize'] - vgoptions = module.params['vg_options'].split() - - if module.params['pvs']: - dev_string = ' '.join(module.params['pvs']) - dev_list = module.params['pvs'] - elif state == 'present': - module.fail_json(msg="No physical volumes given.") - - - - if state=='present': - ### check given devices - for test_dev in dev_list: - if not os.path.exists(test_dev): - module.fail_json(msg="Device %s not found."%test_dev) - - ### get pv list - pvs_cmd = module.get_bin_path('pvs', True) - rc,current_pvs,err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';'" % pvs_cmd) - if rc != 0: - module.fail_json(msg="Failed executing pvs command.",rc=rc, err=err) - - ### check pv for devices - pvs = parse_pvs(module, current_pvs) - used_pvs = [ pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg ] - if used_pvs: - module.fail_json(msg="Device %s is already in %s volume group."%(used_pvs[0]['name'],used_pvs[0]['vg_name'])) - - vgs_cmd = module.get_bin_path('vgs', True) - rc,current_vgs,err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd) - - if rc != 0: - module.fail_json(msg="Failed executing vgs command.",rc=rc, err=err) - - changed = False - - vgs = parse_vgs(current_vgs) - - for test_vg in vgs: - if test_vg['name'] == vg: - this_vg = test_vg - break - else: - this_vg = None - - if this_vg is None: - if state == 'present': - ### create VG - if module.check_mode: - changed = True - else: - ### create PV - pvcreate_cmd = module.get_bin_path('pvcreate', True) - for current_dev in dev_list: - rc,_,err = module.run_command("%s %s" % (pvcreate_cmd,current_dev)) - if rc == 0: - changed = True - else: - module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) - vgcreate_cmd = module.get_bin_path('vgcreate') - rc,_,err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', str(pesize), vg, dev_string]) - if rc == 0: - changed = True - else: - module.fail_json(msg="Creating volume group '%s' failed"%vg, rc=rc, err=err) - else: - if state == 'absent': - if module.check_mode: - module.exit_json(changed=True) - else: - if this_vg['lv_count'] == 0 or force: - ### remove VG - vgremove_cmd = module.get_bin_path('vgremove', True) - rc,_,err = module.run_command("%s --force %s" % (vgremove_cmd, vg)) - if rc == 0: - module.exit_json(changed=True) - else: - module.fail_json(msg="Failed to remove volume group %s"%(vg),rc=rc, err=err) - else: - module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes"%(vg)) - - ### resize VG - current_devs = [ pv['name'] for pv in pvs if pv['vg_name'] == vg ] - devs_to_remove = list(set(current_devs) - set(dev_list)) - devs_to_add = list(set(dev_list) - set(current_devs)) - - if devs_to_add or devs_to_remove: - if module.check_mode: - changed = True - else: - if devs_to_add: - devs_to_add_string = ' '.join(devs_to_add) - ### create PV - pvcreate_cmd = module.get_bin_path('pvcreate', True) - for current_dev in devs_to_add: - rc,_,err = module.run_command("%s %s" % (pvcreate_cmd, current_dev)) - if rc == 0: - changed = True - else: - module.fail_json(msg="Creating physical volume '%s' failed"%current_dev, rc=rc, err=err) - ### add PV to our VG - vgextend_cmd = module.get_bin_path('vgextend', True) - rc,_,err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) - if rc == 0: - changed = True - else: - module.fail_json(msg="Unable to extend %s by %s."%(vg, devs_to_add_string),rc=rc,err=err) - - ### remove some PV from our VG - if devs_to_remove: - devs_to_remove_string = ' '.join(devs_to_remove) - vgreduce_cmd = module.get_bin_path('vgreduce', True) - rc,_,err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string)) - if rc == 0: - changed = True - else: - module.fail_json(msg="Unable to reduce %s by %s."%(vg, devs_to_remove_string),rc=rc,err=err) - - module.exit_json(changed=changed) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/lvol b/library/system/lvol deleted file mode 100644 index 96f1b846e27..00000000000 --- a/library/system/lvol +++ /dev/null @@ -1,235 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Jeroen Hoekx , Alexander Bulimov -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -author: Jeroen Hoekx -module: lvol -short_description: Configure LVM logical volumes -description: - - This module creates, removes or resizes logical volumes. -version_added: "1.1" -options: - vg: - description: - - The volume group this logical volume is part of. - required: true - lv: - description: - - The name of the logical volume. - required: true - size: - description: - - The size of the logical volume, according to lvcreate(8) --size, by - default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or - according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE]; - resizing is not supported with percentages. - state: - choices: [ "present", "absent" ] - default: present - description: - - Control if the logical volume exists. - required: false - force: - version_added: "1.5" - choices: [ "yes", "no" ] - default: "no" - description: - - Shrink or remove operations of volumes requires this switch. Ensures that - that filesystems get never corrupted/destroyed by mistake. - required: false -notes: - - Filesystems on top of the volume are not resized. -''' - -EXAMPLES = ''' -# Create a logical volume of 512m. -- lvol: vg=firefly lv=test size=512 - -# Create a logical volume of 512g. -- lvol: vg=firefly lv=test size=512g - -# Create a logical volume the size of all remaining space in the volume group -- lvol: vg=firefly lv=test size=100%FREE - -# Extend the logical volume to 1024m. -- lvol: vg=firefly lv=test size=1024 - -# Reduce the logical volume to 512m -- lvol: vg=firefly lv=test size=512 force=yes - -# Remove the logical volume. -- lvol: vg=firefly lv=test state=absent force=yes -''' - -import re - -decimal_point = re.compile(r"(\.|,)") - - -def parse_lvs(data): - lvs = [] - for line in data.splitlines(): - parts = line.strip().split(';') - lvs.append({ - 'name': parts[0], - 'size': int(decimal_point.split(parts[1])[0]), - }) - return lvs - - -def main(): - module = AnsibleModule( - argument_spec=dict( - vg=dict(required=True), - lv=dict(required=True), - size=dict(), - state=dict(choices=["absent", "present"], default='present'), - force=dict(type='bool', default='no'), - ), - supports_check_mode=True, - ) - - vg = module.params['vg'] - lv = module.params['lv'] - size = module.params['size'] - state = module.params['state'] - force = module.boolean(module.params['force']) - size_opt = 'L' - size_unit = 'm' - - if size: - # LVCREATE(8) -l --extents option with percentage - if '%' in size: - size_parts = size.split('%', 1) - size_percent = int(size_parts[0]) - if size_percent > 100: - module.fail_json(msg="Size percentage cannot be larger than 100%") - size_whole = size_parts[1] - if size_whole == 'ORIGIN': - module.fail_json(msg="Snapshot Volumes are not supported") - elif size_whole not in ['VG', 'PVS', 'FREE']: - module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE") - size_opt = 'l' - size_unit = '' - - # LVCREATE(8) -L --size option unit - elif size[-1].isalpha(): - if size[-1] in 'bBsSkKmMgGtTpPeE': - size_unit = size[-1] - if size[0:-1].isdigit(): - size = int(size[0:-1]) - else: - module.fail_json(msg="Bad size specification for unit %s" % size_unit) - size_opt = 'L' - else: - module.fail_json(msg="Size unit should be one of [bBsSkKmMgGtTpPeE]") - # when no unit, megabytes by default - elif size.isdigit(): - size = int(size) - else: - module.fail_json(msg="Bad size specification") - - if size_opt == 'l': - unit = 'm' - else: - unit = size_unit - - rc, current_lvs, err = module.run_command( - "lvs --noheadings -o lv_name,size --units %s --separator ';' %s" % (unit, vg)) - - if rc != 0: - if state == 'absent': - module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg, stderr=False) - else: - module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) - - changed = False - - lvs = parse_lvs(current_lvs) - - for test_lv in lvs: - if test_lv['name'] == lv: - this_lv = test_lv - break - else: - this_lv = None - - if state == 'present' and not size: - if this_lv is None: - module.fail_json(msg="No size given.") - else: - module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) - - msg = '' - if this_lv is None: - if state == 'present': - ### create LV - if module.check_mode: - changed = True - else: - rc, _, err = module.run_command("lvcreate -n %s -%s %s%s %s" % (lv, size_opt, size, size_unit, vg)) - if rc == 0: - changed = True - else: - module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err) - else: - if state == 'absent': - ### remove LV - if module.check_mode: - module.exit_json(changed=True) - if not force: - module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name'])) - rc, _, err = module.run_command("lvremove --force %s/%s" % (vg, this_lv['name'])) - if rc == 0: - module.exit_json(changed=True) - else: - module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err) - - elif size_opt == 'l': - module.exit_json(changed=False, msg="Resizing extents with percentage not supported.") - else: - ### resize LV - tool = None - if size > this_lv['size']: - tool = 'lvextend' - elif size < this_lv['size']: - if not force: - module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) - tool = 'lvreduce --force' - - if tool: - if module.check_mode: - changed = True - else: - rc, _, err = module.run_command("%s -%s %s%s %s/%s" % (tool, size_opt, size, size_unit, vg, this_lv['name'])) - if rc == 0: - changed = True - elif "matches existing size" in err: - module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) - else: - module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) - - module.exit_json(changed=changed, msg=msg) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/system/modprobe b/library/system/modprobe deleted file mode 100644 index 50c8f72fb2a..00000000000 --- a/library/system/modprobe +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, David Stygstra -# -# This file is part of Ansible -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - - -DOCUMENTATION = ''' ---- -module: modprobe -short_description: Add or remove kernel modules -requirements: [] -version_added: 1.4 -author: David Stygstra, Julien Dauphant, Matt Jeffery -description: - - Add or remove kernel modules. -options: - name: - required: true - description: - - Name of kernel module to manage. - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the module should be present or absent. - params: - required: false - default: "" - version_added: "1.6" - description: - - Modules parameters. -''' - -EXAMPLES = ''' -# Add the 802.1q module -- modprobe: name=8021q state=present -# Add the dummy module -- modprobe: name=dummy state=present params="numdummies=2" -''' - -def main(): - module = AnsibleModule( - argument_spec={ - 'name': {'required': True}, - 'state': {'default': 'present', 'choices': ['present', 'absent']}, - 'params': {'default': ''}, - }, - supports_check_mode=True, - ) - args = { - 'changed': False, - 'failed': False, - 'name': module.params['name'], - 'state': module.params['state'], - 'params': module.params['params'], - } - - # Check if module is present - try: - modules = open('/proc/modules') - present = False - module_name = args['name'].replace('-', '_') + ' ' - for line in modules: - if line.startswith(module_name): - present = True - break - modules.close() - except IOError, e: - module.fail_json(msg=str(e), **args) - - # Check only; don't modify - if module.check_mode: - if args['state'] == 'present' and not present: - changed = True - elif args['state'] == 'absent' and present: - changed = True - else: - changed = False - module.exit_json(changed=changed) - - # Add/remove module as needed - if args['state'] == 'present': - if not present: - rc, _, err = module.run_command(['modprobe', args['name'], args['params']]) - if rc != 0: - module.fail_json(msg=err, **args) - args['changed'] = True - elif args['state'] == 'absent': - if present: - rc, _, err = module.run_command(['rmmod', args['name']]) - if rc != 0: - module.fail_json(msg=err, **args) - args['changed'] = True - - module.exit_json(**args) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/mount b/library/system/mount deleted file mode 100755 index 9dc6fbe7b8c..00000000000 --- a/library/system/mount +++ /dev/null @@ -1,338 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Red Hat, inc -# Written by Seth Vidal -# based on the mount modules from salt and puppet -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: mount -short_description: Control active and configured mount points -description: - - This module controls active and configured mount points in C(/etc/fstab). -version_added: "0.6" -options: - name: - description: - - "path to the mount point, eg: C(/mnt/files)" - required: true - default: null - aliases: [] - src: - description: - - device to be mounted on I(name). - required: true - default: null - fstype: - description: - - file-system type - required: true - default: null - opts: - description: - - mount options (see fstab(8)) - required: false - default: null - dump: - description: - - dump (see fstab(8)) - required: false - default: null - passno: - description: - - passno (see fstab(8)) - required: false - default: null - state: - description: - - If C(mounted) or C(unmounted), the device will be actively mounted or unmounted - as needed and appropriately configured in I(fstab). - C(absent) and C(present) only deal with - I(fstab) but will not affect current mounting. If specifying C(mounted) and the mount - point is not present, the mount point will be created. Similarly, specifying C(absent) will remove the mount point directory. - required: true - choices: [ "present", "absent", "mounted", "unmounted" ] - default: null - fstab: - description: - - file to use instead of C(/etc/fstab). You shouldn't use that option - unless you really know what you are doing. This might be useful if - you need to configure mountpoints in a chroot environment. - required: false - default: /etc/fstab - -notes: [] -requirements: [] -author: Seth Vidal -''' -EXAMPLES = ''' -# Mount DVD read-only -- mount: name=/mnt/dvd src=/dev/sr0 fstype=iso9660 opts=ro state=present - -# Mount up device by label -- mount: name=/srv/disk src='LABEL=SOME_LABEL' fstype=ext4 state=present - -# Mount up device by UUID -- mount: name=/home src='UUID=b3e48f45-f933-4c8e-a700-22a159ec9077' fstype=xfs opts=noatime state=present -''' - - -def write_fstab(lines, dest): - - fs_w = open(dest, 'w') - for l in lines: - fs_w.write(l) - - fs_w.flush() - fs_w.close() - -def set_mount(**kwargs): - """ set/change a mount point location in fstab """ - - # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab - args = dict( - opts = 'defaults', - dump = '0', - passno = '0', - fstab = '/etc/fstab' - ) - args.update(kwargs) - - new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n' - - to_write = [] - exists = False - changed = False - for line in open(args['fstab'], 'r').readlines(): - if not line.strip(): - to_write.append(line) - continue - if line.strip().startswith('#'): - to_write.append(line) - continue - if len(line.split()) != 6: - # not sure what this is or why it is here - # but it is not our fault so leave it be - to_write.append(line) - continue - - ld = {} - ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() - - if ld['name'] != args['name']: - to_write.append(line) - continue - - # it exists - now see if what we have is different - exists = True - for t in ('src', 'fstype','opts', 'dump', 'passno'): - if ld[t] != args[t]: - changed = True - ld[t] = args[t] - - if changed: - to_write.append(new_line % ld) - else: - to_write.append(line) - - if not exists: - to_write.append(new_line % args) - changed = True - - if changed: - write_fstab(to_write, args['fstab']) - - return (args['name'], changed) - - -def unset_mount(**kwargs): - """ remove a mount point from fstab """ - - # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab - args = dict( - opts = 'default', - dump = '0', - passno = '0', - fstab = '/etc/fstab' - ) - args.update(kwargs) - - to_write = [] - changed = False - for line in open(args['fstab'], 'r').readlines(): - if not line.strip(): - to_write.append(line) - continue - if line.strip().startswith('#'): - to_write.append(line) - continue - if len(line.split()) != 6: - # not sure what this is or why it is here - # but it is not our fault so leave it be - to_write.append(line) - continue - - ld = {} - ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() - - if ld['name'] != args['name']: - to_write.append(line) - continue - - # if we got here we found a match - continue and mark changed - changed = True - - if changed: - write_fstab(to_write, args['fstab']) - - return (args['name'], changed) - - -def mount(module, **kwargs): - """ mount up a path or remount if needed """ - mount_bin = module.get_bin_path('mount') - - name = kwargs['name'] - if os.path.ismount(name): - cmd = [ mount_bin , '-o', 'remount', name ] - else: - cmd = [ mount_bin, name ] - - rc, out, err = module.run_command(cmd) - if rc == 0: - return 0, '' - else: - return rc, out+err - -def umount(module, **kwargs): - """ unmount a path """ - - umount_bin = module.get_bin_path('umount') - name = kwargs['name'] - cmd = [umount_bin, name] - - rc, out, err = module.run_command(cmd) - if rc == 0: - return 0, '' - else: - return rc, out+err - -def main(): - - module = AnsibleModule( - argument_spec = dict( - state = dict(required=True, choices=['present', 'absent', 'mounted', 'unmounted']), - name = dict(required=True), - opts = dict(default=None), - passno = dict(default=None), - dump = dict(default=None), - src = dict(required=True), - fstype = dict(required=True), - fstab = dict(default='/etc/fstab') - ) - ) - - - changed = False - rc = 0 - args = { - 'name': module.params['name'], - 'src': module.params['src'], - 'fstype': module.params['fstype'] - } - if module.params['passno'] is not None: - args['passno'] = module.params['passno'] - if module.params['opts'] is not None: - args['opts'] = module.params['opts'] - if ' ' in args['opts']: - module.fail_json(msg="unexpected space in 'opts' parameter") - if module.params['dump'] is not None: - args['dump'] = module.params['dump'] - if module.params['fstab'] is not None: - args['fstab'] = module.params['fstab'] - - # if fstab file does not exist, we first need to create it. This mainly - # happens when fstab optin is passed to the module. - if not os.path.exists(args['fstab']): - if not os.path.exists(os.path.dirname(args['fstab'])): - os.makedirs(os.path.dirname(args['fstab'])) - open(args['fstab'],'a').close() - - # absent == remove from fstab and unmounted - # unmounted == do not change fstab state, but unmount - # present == add to fstab, do not change mount state - # mounted == add to fstab if not there and make sure it is mounted, if it has changed in fstab then remount it - - state = module.params['state'] - name = module.params['name'] - if state == 'absent': - name, changed = unset_mount(**args) - if changed: - if os.path.ismount(name): - res,msg = umount(module, **args) - if res: - module.fail_json(msg="Error unmounting %s: %s" % (name, msg)) - - if os.path.exists(name): - try: - os.rmdir(name) - except (OSError, IOError), e: - module.fail_json(msg="Error rmdir %s: %s" % (name, str(e))) - - module.exit_json(changed=changed, **args) - - if state == 'unmounted': - if os.path.ismount(name): - res,msg = umount(module, **args) - if res: - module.fail_json(msg="Error unmounting %s: %s" % (name, msg)) - changed = True - - module.exit_json(changed=changed, **args) - - if state in ['mounted', 'present']: - if state == 'mounted': - if not os.path.exists(name): - try: - os.makedirs(name) - except (OSError, IOError), e: - module.fail_json(msg="Error making dir %s: %s" % (name, str(e))) - - name, changed = set_mount(**args) - if state == 'mounted': - res = 0 - if os.path.ismount(name): - if changed: - res,msg = mount(module, **args) - else: - changed = True - res,msg = mount(module, **args) - - if res: - module.fail_json(msg="Error mounting %s: %s" % (name, msg)) - - - module.exit_json(changed=changed, **args) - - module.fail_json(msg='Unexpected position reached') - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/ohai b/library/system/ohai deleted file mode 100644 index b50abc9db03..00000000000 --- a/library/system/ohai +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: ohai -short_description: Returns inventory data from I(Ohai) -description: - - Similar to the M(facter) module, this runs the I(Ohai) discovery program - (U(http://wiki.opscode.com/display/chef/Ohai)) on the remote host and - returns JSON inventory data. - I(Ohai) data is a bit more verbose and nested than I(facter). -version_added: "0.6" -options: {} -notes: [] -requirements: [ "ohai" ] -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Retrieve (ohai) data from all Web servers and store in one-file per host -ansible webservers -m ohai --tree=/tmp/ohaidata -''' - -def main(): - module = AnsibleModule( - argument_spec = dict() - ) - cmd = ["/usr/bin/env", "ohai"] - rc, out, err = module.run_command(cmd, check_rc=True) - module.exit_json(**json.loads(out)) - -# import module snippets -from ansible.module_utils.basic import * - -main() - - diff --git a/library/system/open_iscsi b/library/system/open_iscsi deleted file mode 100644 index c661a723d77..00000000000 --- a/library/system/open_iscsi +++ /dev/null @@ -1,379 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Serge van Ginderachter -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: open_iscsi -author: Serge van Ginderachter -version_added: "1.4" -short_description: Manage iscsi targets with open-iscsi -description: - - Discover targets on given portal, (dis)connect targets, mark targets to - manually or auto start, return device nodes of connected targets. -requirements: - - open_iscsi library and tools (iscsiadm) -options: - portal: - required: false - aliases: [ip] - description: - - the ip address of the iscsi target - port: - required: false - default: 3260 - description: - - the port on which the iscsi target process listens - target: - required: false - aliases: [name, targetname] - description: - - the iscsi target name - login: - required: false - choices: [true, false] - description: - - whether the target node should be connected - node_auth: - required: false - default: CHAP - description: - - discovery.sendtargets.auth.authmethod - node_user: - required: false - description: - - discovery.sendtargets.auth.username - node_pass: - required: false - description: - - discovery.sendtargets.auth.password - auto_node_startup: - aliases: [automatic] - required: false - choices: [true, false] - description: - - whether the target node should be automatically connected at startup - discover: - required: false - choices: [true, false] - description: - - whether the list of target nodes on the portal should be - (re)discovered and added to the persistent iscsi database. - Keep in mind that iscsiadm discovery resets configurtion, like node.startup - to manual, hence combined with auto_node_startup=yes will allways return - a changed state. - show_nodes: - required: false - choices: [true, false] - description: - - whether the list of nodes in the persistent iscsi database should be - returned by the module - -examples: - - description: perform a discovery on 10.1.2.3 and show available target - nodes - code: > - open_iscsi: show_nodes=yes discover=yes portal=10.1.2.3 - - description: discover targets on portal and login to the one available - (only works if exactly one target is exported to the initiator) - code: > - open_iscsi: portal={{iscsi_target}} login=yes discover=yes - - description: connect to the named target, after updating the local - persistent database (cache) - code: > - open_iscsi: login=yes target=iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d - - description: discconnect from the cached named target - code: > - open_iscsi: login=no target=iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d" -''' - -import glob -import time - -ISCSIADM = 'iscsiadm' - -def compare_nodelists(l1, l2): - - l1.sort() - l2.sort() - return l1 == l2 - - -def iscsi_get_cached_nodes(module, portal=None): - - cmd = '%s --mode node' % iscsiadm_cmd - (rc, out, err) = module.run_command(cmd) - - if rc == 0: - lines = out.splitlines() - nodes = [] - for line in lines: - # line format is "ip:port,target_portal_group_tag targetname" - parts = line.split() - if len(parts) > 2: - module.fail_json(msg='error parsing output', cmd=cmd) - target = parts[1] - parts = parts[0].split(':') - target_portal = parts[0] - - if portal is None or portal == target_portal: - nodes.append(target) - - # older versions of scsiadm don't have nice return codes - # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details - # err can contain [N|n]o records... - elif rc == 21 or (rc == 255 and "o records found" in err): - nodes = [] - else: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - return nodes - - -def iscsi_discover(module, portal, port): - - cmd = '%s --mode discovery --type sendtargets --portal %s:%s' % (iscsiadm_cmd, portal, port) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - -def target_loggedon(module, target): - - cmd = '%s --mode session' % iscsiadm_cmd - (rc, out, err) = module.run_command(cmd) - - if rc == 0: - return target in out - elif rc == 21: - return False - else: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - -def target_login(module, target): - - node_auth = module.params['node_auth'] - node_user = module.params['node_user'] - node_pass = module.params['node_pass'] - - if node_user: - params = [('node.session.auth.authmethod', node_auth), - ('node.session.auth.username', node_user), - ('node.session.auth.password', node_pass)] - for (name, value) in params: - cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value) - (rc, out, err) = module.run_command(cmd) - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - cmd = '%s --mode node --targetname %s --login' % (iscsiadm_cmd, target) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - -def target_logout(module, target): - - cmd = '%s --mode node --targetname %s --logout' % (iscsiadm_cmd, target) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - -def target_device_node(module, target): - - # if anyone know a better way to find out which devicenodes get created for - # a given target... - - devices = glob.glob('/dev/disk/by-path/*%s*' % target) - if len(devices) == 0: - return None - else: - devdisks = [] - for dev in devices: - # exclude partitions - if "-part" not in dev: - devdisk = os.path.realpath(dev) - # only add once (multi-path?) - if devdisk not in devdisks: - devdisks.append(devdisk) - return devdisks - - -def target_isauto(module, target): - - cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target) - (rc, out, err) = module.run_command(cmd) - - if rc == 0: - lines = out.splitlines() - for line in lines: - if 'node.startup' in line: - return 'automatic' in line - return False - else: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - -def target_setauto(module, target): - - cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - -def target_setmanual(module, target): - - cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - -def main(): - - # load ansible module object - module = AnsibleModule( - argument_spec = dict( - - # target - portal = dict(required=False, aliases=['ip']), - port = dict(required=False, default=3260), - target = dict(required=False, aliases=['name', 'targetname']), - node_auth = dict(required=False, default='CHAP'), - node_user = dict(required=False), - node_pass = dict(required=False), - - # actions - login = dict(type='bool', aliases=['state']), - auto_node_startup = dict(type='bool', aliases=['automatic']), - discover = dict(type='bool', default=False), - show_nodes = dict(type='bool', default=False) - ), - - required_together=[['discover_user', 'discover_pass'], - ['node_user', 'node_pass']], - supports_check_mode=True - ) - - global iscsiadm_cmd - iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True) - - # parameters - portal = module.params['portal'] - target = module.params['target'] - port = module.params['port'] - login = module.params['login'] - automatic = module.params['auto_node_startup'] - discover = module.params['discover'] - show_nodes = module.params['show_nodes'] - - check = module.check_mode - - cached = iscsi_get_cached_nodes(module, portal) - - # return json dict - result = {} - result['changed'] = False - - if discover: - if portal is None: - module.fail_json(msg = "Need to specify at least the portal (ip) to discover") - elif check: - nodes = cached - else: - iscsi_discover(module, portal, port) - nodes = iscsi_get_cached_nodes(module, portal) - if not compare_nodelists(cached, nodes): - result['changed'] |= True - result['cache_updated'] = True - else: - nodes = cached - - if login is not None or automatic is not None: - if target is None: - if len(nodes) > 1: - module.fail_json(msg = "Need to specify a target") - else: - target = nodes[0] - else: - # check given target is in cache - check_target = False - for node in nodes: - if node == target: - check_target = True - break - if not check_target: - module.fail_json(msg = "Specified target not found") - - if show_nodes: - result['nodes'] = nodes - - if login is not None: - loggedon = target_loggedon(module,target) - if (login and loggedon) or (not login and not loggedon): - result['changed'] |= False - if login: - result['devicenodes'] = target_device_node(module,target) - elif not check: - if login: - target_login(module, target) - # give udev some time - time.sleep(1) - result['devicenodes'] = target_device_node(module,target) - else: - target_logout(module, target) - result['changed'] |= True - result['connection_changed'] = True - else: - result['changed'] |= True - result['connection_changed'] = True - - if automatic is not None: - isauto = target_isauto(module, target) - if (automatic and isauto) or (not automatic and not isauto): - result['changed'] |= False - result['automatic_changed'] = False - elif not check: - if automatic: - target_setauto(module, target) - else: - target_setmanual(module, target) - result['changed'] |= True - result['automatic_changed'] = True - else: - result['changed'] |= True - result['automatic_changed'] = True - - module.exit_json(**result) - - - -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/system/ping b/library/system/ping deleted file mode 100644 index b098d0054cd..00000000000 --- a/library/system/ping +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - -DOCUMENTATION = ''' ---- -module: ping -version_added: historical -short_description: Try to connect to host and return C(pong) on success. -description: - - A trivial test module, this module always returns C(pong) on successful - contact. It does not make sense in playbooks, but it is useful from - C(/usr/bin/ansible) -options: {} -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Test 'webservers' status -ansible webservers -m ping -''' - -import exceptions - -def main(): - module = AnsibleModule( - argument_spec = dict( - data=dict(required=False, default=None), - ), - supports_check_mode = True - ) - result = dict(ping='pong') - if module.params['data']: - if module.params['data'] == 'crash': - raise exceptions.Exception("boom") - result['ping'] = module.params['data'] - module.exit_json(**result) - -from ansible.module_utils.basic import * - -main() - diff --git a/library/system/seboolean b/library/system/seboolean deleted file mode 100644 index 9799e71636a..00000000000 --- a/library/system/seboolean +++ /dev/null @@ -1,212 +0,0 @@ -#!/usr/bin/python - -# (c) 2012, Stephen Fromm -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: seboolean -short_description: Toggles SELinux booleans. -description: - - Toggles SELinux booleans. -version_added: "0.7" -options: - name: - description: - - Name of the boolean to configure - required: true - default: null - persistent: - description: - - Set to C(yes) if the boolean setting should survive a reboot - required: false - default: no - choices: [ "yes", "no" ] - state: - description: - - Desired boolean value - required: true - default: null - choices: [ 'yes', 'no' ] -notes: - - Not tested on any debian based system -requirements: [ ] -author: Stephen Fromm -''' - -EXAMPLES = ''' -# Set (httpd_can_network_connect) flag on and keep it persistent across reboots -- seboolean: name=httpd_can_network_connect state=yes persistent=yes -''' - -try: - import selinux - HAVE_SELINUX=True -except ImportError: - HAVE_SELINUX=False - -try: - import semanage - HAVE_SEMANAGE=True -except ImportError: - HAVE_SEMANAGE=False - -def has_boolean_value(module, name): - bools = [] - try: - rc, bools = selinux.security_get_boolean_names() - except OSError, e: - module.fail_json(msg="Failed to get list of boolean names") - if name in bools: - return True - else: - return False - -def get_boolean_value(module, name): - state = 0 - try: - state = selinux.security_get_boolean_active(name) - except OSError, e: - module.fail_json(msg="Failed to determine current state for boolean %s" % name) - if state == 1: - return True - else: - return False - -# The following method implements what setsebool.c does to change -# a boolean and make it persist after reboot.. -def semanage_boolean_value(module, name, state): - rc = 0 - value = 0 - if state: - value = 1 - handle = semanage.semanage_handle_create() - if handle is None: - module.fail_json(msg="Failed to create semanage library handle") - try: - managed = semanage.semanage_is_managed(handle) - if managed < 0: - module.fail_json(msg="Failed to determine whether policy is manage") - if managed == 0: - if os.getuid() == 0: - module.fail_json(msg="Cannot set persistent booleans without managed policy") - else: - module.fail_json(msg="Cannot set persistent booleans; please try as root") - if semanage.semanage_connect(handle) < 0: - module.fail_json(msg="Failed to connect to semanage") - - if semanage.semanage_begin_transaction(handle) < 0: - module.fail_json(msg="Failed to begin semanage transaction") - - rc, sebool = semanage.semanage_bool_create(handle) - if rc < 0: - module.fail_json(msg="Failed to create seboolean with semanage") - if semanage.semanage_bool_set_name(handle, sebool, name) < 0: - module.fail_json(msg="Failed to set seboolean name with semanage") - semanage.semanage_bool_set_value(sebool, value) - - rc, boolkey = semanage.semanage_bool_key_extract(handle, sebool) - if rc < 0: - module.fail_json(msg="Failed to extract boolean key with semanage") - - if semanage.semanage_bool_modify_local(handle, boolkey, sebool) < 0: - module.fail_json(msg="Failed to modify boolean key with semanage") - - if semanage.semanage_bool_set_active(handle, boolkey, sebool) < 0: - module.fail_json(msg="Failed to set boolean key active with semanage") - - semanage.semanage_bool_key_free(boolkey) - semanage.semanage_bool_free(sebool) - - semanage.semanage_set_reload(handle, 0) - if semanage.semanage_commit(handle) < 0: - module.fail_json(msg="Failed to commit changes to semanage") - - semanage.semanage_disconnect(handle) - semanage.semanage_handle_destroy(handle) - except Exception, e: - module.fail_json(msg="Failed to manage policy for boolean %s: %s" % (name, str(e))) - return True - -def set_boolean_value(module, name, state): - rc = 0 - value = 0 - if state: - value = 1 - try: - rc = selinux.security_set_boolean(name, value) - except OSError, e: - module.fail_json(msg="Failed to set boolean %s to %s" % (name, value)) - if rc == 0: - return True - else: - return False - -def main(): - module = AnsibleModule( - argument_spec = dict( - name=dict(required=True), - persistent=dict(default='no', type='bool'), - state=dict(required=True, type='bool') - ), - supports_check_mode=True - ) - - if not HAVE_SELINUX: - module.fail_json(msg="This module requires libselinux-python support") - - if not HAVE_SEMANAGE: - module.fail_json(msg="This module requires libsemanage-python support") - - if not selinux.is_selinux_enabled(): - module.fail_json(msg="SELinux is disabled on this host.") - - name = module.params['name'] - persistent = module.params['persistent'] - state = module.params['state'] - result = {} - result['name'] = name - - if not has_boolean_value(module, name): - module.fail_json(msg="SELinux boolean %s does not exist." % name) - - cur_value = get_boolean_value(module, name) - - if cur_value == state: - result['state'] = cur_value - result['changed'] = False - module.exit_json(**result) - - if module.check_mode: - module.exit_json(changed=True) - if persistent: - r = semanage_boolean_value(module, name, state) - else: - r = set_boolean_value(module, name, state) - - result['changed'] = r - if not r: - module.fail_json(msg="Failed to set boolean %s to %s" % (name, value)) - try: - selinux.security_commit_booleans() - except: - module.fail_json(msg="Failed to commit pending boolean %s value" % name) - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/selinux b/library/system/selinux deleted file mode 100644 index 53e53d1d49c..00000000000 --- a/library/system/selinux +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Derek Carter -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: selinux -short_description: Change policy and state of SELinux -description: - - Configures the SELinux mode and policy. A reboot may be required after usage. Ansible will not issue this reboot but will let you know when it is required. -version_added: "0.7" -options: - policy: - description: - - "name of the SELinux policy to use (example: C(targeted)) will be required if state is not C(disabled)" - required: false - default: null - state: - description: - - The SELinux mode - required: true - default: null - choices: [ "enforcing", "permissive", "disabled" ] - conf: - description: - - path to the SELinux configuration file, if non-standard - required: false - default: "/etc/selinux/config" -notes: - - Not tested on any debian based system -requirements: [ libselinux-python ] -author: Derek Carter -''' - -EXAMPLES = ''' -- selinux: policy=targeted state=enforcing -- selinux: policy=targeted state=permissive -- selinux: state=disabled -''' - -import os -import re -import sys - -try: - import selinux -except ImportError: - print "failed=True msg='libselinux-python required for this module'" - sys.exit(1) - -# getter subroutines -def get_config_state(configfile): - myfile = open(configfile, "r") - lines = myfile.readlines() - myfile.close() - for line in lines: - stateline = re.match('^SELINUX=.*$', line) - if (stateline): - return(line.split('=')[1].strip()) - -def get_config_policy(configfile): - myfile = open(configfile, "r") - lines = myfile.readlines() - myfile.close() - for line in lines: - stateline = re.match('^SELINUXTYPE=.*$', line) - if (stateline): - return(line.split('=')[1].strip()) - -# setter subroutines -def set_config_state(state, configfile): - #SELINUX=permissive - # edit config file with state value - stateline='SELINUX=%s' % state - myfile = open(configfile, "r") - lines = myfile.readlines() - myfile.close() - myfile = open(configfile, "w") - for line in lines: - myfile.write(re.sub(r'^SELINUX=.*', stateline, line)) - myfile.close() - -def set_state(state): - if (state == 'enforcing'): - selinux.security_setenforce(1) - elif (state == 'permissive'): - selinux.security_setenforce(0) - elif (state == 'disabled'): - pass - else: - msg = 'trying to set invalid runtime state %s' % state - module.fail_json(msg=msg) - -def set_config_policy(policy, configfile): - # edit config file with state value - #SELINUXTYPE=targeted - policyline='SELINUXTYPE=%s' % policy - myfile = open(configfile, "r") - lines = myfile.readlines() - myfile.close() - myfile = open(configfile, "w") - for line in lines: - myfile.write(re.sub(r'^SELINUXTYPE=.*', policyline, line)) - myfile.close() - -def main(): - - module = AnsibleModule( - argument_spec = dict( - policy=dict(required=False), - state=dict(choices=['enforcing', 'permissive', 'disabled'], required=True), - configfile=dict(aliases=['conf','file'], default='/etc/selinux/config') - ), - supports_check_mode=True - ) - - # global vars - changed=False - msgs = [] - configfile = module.params['configfile'] - policy = module.params['policy'] - state = module.params['state'] - runtime_enabled = selinux.is_selinux_enabled() - runtime_policy = selinux.selinux_getpolicytype()[1] - runtime_state = 'disabled' - if (runtime_enabled): - # enabled means 'enforcing' or 'permissive' - if (selinux.security_getenforce()): - runtime_state = 'enforcing' - else: - runtime_state = 'permissive' - config_policy = get_config_policy(configfile) - config_state = get_config_state(configfile) - - # check to see if policy is set if state is not 'disabled' - if (state != 'disabled'): - if not policy: - module.fail_json(msg='policy is required if state is not \'disabled\'') - else: - if not policy: - policy = config_policy - - # check changed values and run changes - if (policy != runtime_policy): - if module.check_mode: - module.exit_json(changed=True) - # cannot change runtime policy - msgs.append('reboot to change the loaded policy') - changed=True - - if (policy != config_policy): - if module.check_mode: - module.exit_json(changed=True) - msgs.append('config policy changed from \'%s\' to \'%s\'' % (config_policy, policy)) - set_config_policy(policy, configfile) - changed=True - - if (state != runtime_state): - if module.check_mode: - module.exit_json(changed=True) - if (state == 'disabled'): - msgs.append('state change will take effect next reboot') - else: - if (runtime_enabled): - set_state(state) - msgs.append('runtime state changed from \'%s\' to \'%s\'' % (runtime_state, state)) - else: - msgs.append('state change will take effect next reboot') - changed=True - - if (state != config_state): - if module.check_mode: - module.exit_json(changed=True) - msgs.append('config state changed from \'%s\' to \'%s\'' % (config_state, state)) - set_config_state(state, configfile) - changed=True - - module.exit_json(changed=changed, msg=', '.join(msgs), - configfile=configfile, - policy=policy, state=state) - -################################################# -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/system/service b/library/system/service deleted file mode 100644 index 2da5e53b01f..00000000000 --- a/library/system/service +++ /dev/null @@ -1,1266 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: service -author: Michael DeHaan -version_added: "0.1" -short_description: Manage services. -description: - - Controls services on remote hosts. -options: - name: - required: true - description: - - Name of the service. - state: - required: false - choices: [ started, stopped, restarted, reloaded ] - description: - - C(started)/C(stopped) are idempotent actions that will not run - commands unless necessary. C(restarted) will always bounce the - service. C(reloaded) will always reload. B(At least one of state - and enabled are required.) - sleep: - required: false - version_added: "1.3" - description: - - If the service is being C(restarted) then sleep this many seconds - between the stop and start command. This helps to workaround badly - behaving init scripts that exit immediately after signaling a process - to stop. - pattern: - required: false - version_added: "0.7" - description: - - If the service does not respond to the status command, name a - substring to look for as would be found in the output of the I(ps) - command as a stand-in for a status result. If the string is found, - the service will be assumed to be running. - enabled: - required: false - choices: [ "yes", "no" ] - description: - - Whether the service should start on boot. B(At least one of state and - enabled are required.) - - runlevel: - required: false - default: 'default' - description: - - "For OpenRC init scripts (ex: Gentoo) only. The runlevel that this service belongs to." - arguments: - description: - - Additional arguments provided on the command line - aliases: [ 'args' ] -''' - -EXAMPLES = ''' -# Example action to start service httpd, if not running -- service: name=httpd state=started - -# Example action to stop service httpd, if running -- service: name=httpd state=stopped - -# Example action to restart service httpd, in all cases -- service: name=httpd state=restarted - -# Example action to reload service httpd, in all cases -- service: name=httpd state=reloaded - -# Example action to enable service httpd, and not touch the running state -- service: name=httpd enabled=yes - -# Example action to start service foo, based on running process /usr/bin/foo -- service: name=foo pattern=/usr/bin/foo state=started - -# Example action to restart network service for interface eth0 -- service: name=network state=restarted args=eth0 -''' - -import platform -import os -import re -import tempfile -import shlex -import select -import time -import string - -from distutils.version import LooseVersion - -class Service(object): - """ - This is the generic Service manipulation class that is subclassed - based on platform. - - A subclass should override the following action methods:- - - get_service_tools - - service_enable - - get_service_status - - service_control - - All subclasses MUST define platform and distribution (which may be None). - """ - - platform = 'Generic' - distribution = None - - def __new__(cls, *args, **kwargs): - return load_platform_subclass(Service, args, kwargs) - - def __init__(self, module): - self.module = module - self.name = module.params['name'] - self.state = module.params['state'] - self.sleep = module.params['sleep'] - self.pattern = module.params['pattern'] - self.enable = module.params['enabled'] - self.runlevel = module.params['runlevel'] - self.changed = False - self.running = None - self.crashed = None - self.action = None - self.svc_cmd = None - self.svc_initscript = None - self.svc_initctl = None - self.enable_cmd = None - self.arguments = module.params.get('arguments', '') - self.rcconf_file = None - self.rcconf_key = None - self.rcconf_value = None - self.svc_change = False - - # select whether we dump additional debug info through syslog - self.syslogging = False - - # =========================================== - # Platform specific methods (must be replaced by subclass). - - def get_service_tools(self): - self.module.fail_json(msg="get_service_tools not implemented on target platform") - - def service_enable(self): - self.module.fail_json(msg="service_enable not implemented on target platform") - - def get_service_status(self): - self.module.fail_json(msg="get_service_status not implemented on target platform") - - def service_control(self): - self.module.fail_json(msg="service_control not implemented on target platform") - - # =========================================== - # Generic methods that should be used on all platforms. - - def execute_command(self, cmd, daemonize=False): - if self.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'Command %s, daemonize %r' % (cmd, daemonize)) - - # Most things don't need to be daemonized - if not daemonize: - return self.module.run_command(cmd) - - # This is complex because daemonization is hard for people. - # What we do is daemonize a part of this module, the daemon runs the - # command, picks up the return code and output, and returns it to the - # main process. - pipe = os.pipe() - pid = os.fork() - if pid == 0: - os.close(pipe[0]) - # Set stdin/stdout/stderr to /dev/null - fd = os.open(os.devnull, os.O_RDWR) - if fd != 0: - os.dup2(fd, 0) - if fd != 1: - os.dup2(fd, 1) - if fd != 2: - os.dup2(fd, 2) - if fd not in (0, 1, 2): - os.close(fd) - - # Make us a daemon. Yes, that's all it takes. - pid = os.fork() - if pid > 0: - os._exit(0) - os.setsid() - os.chdir("/") - pid = os.fork() - if pid > 0: - os._exit(0) - - # Start the command - if isinstance(cmd, basestring): - cmd = shlex.split(cmd) - p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1])) - stdout = "" - stderr = "" - fds = [p.stdout, p.stderr] - # Wait for all output, or until the main process is dead and its output is done. - while fds: - rfd, wfd, efd = select.select(fds, [], fds, 1) - if not (rfd + wfd + efd) and p.poll() is not None: - break - if p.stdout in rfd: - dat = os.read(p.stdout.fileno(), 4096) - if not dat: - fds.remove(p.stdout) - stdout += dat - if p.stderr in rfd: - dat = os.read(p.stderr.fileno(), 4096) - if not dat: - fds.remove(p.stderr) - stderr += dat - p.wait() - # Return a JSON blob to parent - os.write(pipe[1], json.dumps([p.returncode, stdout, stderr])) - os.close(pipe[1]) - os._exit(0) - elif pid == -1: - self.module.fail_json(msg="unable to fork") - else: - os.close(pipe[1]) - os.waitpid(pid, 0) - # Wait for data from daemon process and process it. - data = "" - while True: - rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]]) - if pipe[0] in rfd: - dat = os.read(pipe[0], 4096) - if not dat: - break - data += dat - return json.loads(data) - - def check_ps(self): - # Set ps flags - if platform.system() == 'SunOS': - psflags = '-ef' - else: - psflags = 'auxww' - - # Find ps binary - psbin = self.module.get_bin_path('ps', True) - - (rc, psout, pserr) = self.execute_command('%s %s' % (psbin, psflags)) - # If rc is 0, set running as appropriate - if rc == 0: - self.running = False - lines = psout.split("\n") - for line in lines: - if self.pattern in line and not "pattern=" in line: - # so as to not confuse ./hacking/test-module - self.running = True - break - - def check_service_changed(self): - if self.state and self.running is None: - self.module.fail_json(msg="failed determining service state, possible typo of service name?") - # Find out if state has changed - if not self.running and self.state in ["started", "running", "reloaded"]: - self.svc_change = True - elif self.running and self.state in ["stopped","reloaded"]: - self.svc_change = True - elif self.state == "restarted": - self.svc_change = True - if self.module.check_mode and self.svc_change: - self.module.exit_json(changed=True, msg='service state changed') - - def modify_service_state(self): - - # Only do something if state will change - if self.svc_change: - # Control service - if self.state in ['started', 'running']: - self.action = "start" - elif not self.running and self.state == 'reloaded': - self.action = "start" - elif self.state == 'stopped': - self.action = "stop" - elif self.state == 'reloaded': - self.action = "reload" - elif self.state == 'restarted': - self.action = "restart" - - if self.module.check_mode: - self.module.exit_json(changed=True, msg='changing service state') - - return self.service_control() - - else: - # If nothing needs to change just say all is well - rc = 0 - err = '' - out = '' - return rc, out, err - - def service_enable_rcconf(self): - if self.rcconf_file is None or self.rcconf_key is None or self.rcconf_value is None: - self.module.fail_json(msg="service_enable_rcconf() requires rcconf_file, rcconf_key and rcconf_value") - - self.changed = None - entry = '%s="%s"\n' % (self.rcconf_key, self.rcconf_value) - RCFILE = open(self.rcconf_file, "r") - new_rc_conf = [] - - # Build a list containing the possibly modified file. - for rcline in RCFILE: - # Parse line removing whitespaces, quotes, etc. - rcarray = shlex.split(rcline, comments=True) - if len(rcarray) >= 1 and '=' in rcarray[0]: - (key, value) = rcarray[0].split("=", 1) - if key == self.rcconf_key: - if value.upper() == self.rcconf_value: - # Since the proper entry already exists we can stop iterating. - self.changed = False - break - else: - # We found the key but the value is wrong, replace with new entry. - rcline = entry - self.changed = True - - # Add line to the list. - new_rc_conf.append(rcline) - - # We are done with reading the current rc.conf, close it. - RCFILE.close() - - # If we did not see any trace of our entry we need to add it. - if self.changed is None: - new_rc_conf.append(entry) - self.changed = True - - if self.changed is True: - - if self.module.check_mode: - self.module.exit_json(changed=True, msg="changing service enablement") - - # Create a temporary file next to the current rc.conf (so we stay on the same filesystem). - # This way the replacement operation is atomic. - rcconf_dir = os.path.dirname(self.rcconf_file) - rcconf_base = os.path.basename(self.rcconf_file) - (TMP_RCCONF, tmp_rcconf_file) = tempfile.mkstemp(dir=rcconf_dir, prefix="%s-" % rcconf_base) - - # Write out the contents of the list into our temporary file. - for rcline in new_rc_conf: - os.write(TMP_RCCONF, rcline) - - # Close temporary file. - os.close(TMP_RCCONF) - - # Replace previous rc.conf. - self.module.atomic_move(tmp_rcconf_file, self.rcconf_file) - -# =========================================== -# Subclass: Linux - -class LinuxService(Service): - """ - This is the Linux Service manipulation class - it is currently supporting - a mixture of binaries and init scripts for controlling services started at - boot, as well as for controlling the current state. - """ - - platform = 'Linux' - distribution = None - - def get_service_tools(self): - - paths = [ '/sbin', '/usr/sbin', '/bin', '/usr/bin' ] - binaries = [ 'service', 'chkconfig', 'update-rc.d', 'rc-service', 'rc-update', 'initctl', 'systemctl', 'start', 'stop', 'restart' ] - initpaths = [ '/etc/init.d' ] - location = dict() - - for binary in binaries: - location[binary] = self.module.get_bin_path(binary) - - def check_systemd(name): - # verify service is managed by systemd - if not location.get('systemctl', None): - return False - - # default to .service if the unit type is not specified - if name.find('.') > 0: - unit_name, unit_type = name.rsplit('.', 1) - if unit_type not in ("service", "socket", "device", "mount", "automount", - "swap", "target", "path", "timer", "snapshot"): - name = "%s.service" % name - else: - name = "%s.service" % name - - rc, out, err = self.execute_command("%s list-unit-files" % (location['systemctl'])) - - # adjust the service name to account for template service unit files - index = name.find('@') - if index != -1: - template_name = name[:index+1] - else: - template_name = name - - self.__systemd_unit = None - for line in out.splitlines(): - if line.startswith(template_name): - self.__systemd_unit = name - return True - return False - - # Locate a tool for enable options - if location.get('chkconfig', None) and os.path.exists("/etc/init.d/%s" % self.name): - if check_systemd(self.name): - # service is managed by systemd - self.enable_cmd = location['systemctl'] - else: - # we are using a standard SysV service - self.enable_cmd = location['chkconfig'] - elif location.get('update-rc.d', None): - if check_systemd(self.name): - # service is managed by systemd - self.enable_cmd = location['systemctl'] - elif location['initctl'] and os.path.exists("/etc/init/%s.conf" % self.name): - # service is managed by upstart - self.enable_cmd = location['initctl'] - elif location['update-rc.d'] and os.path.exists("/etc/init.d/%s" % self.name): - # service is managed by with SysV init scripts, but with update-rc.d - self.enable_cmd = location['update-rc.d'] - else: - self.module.fail_json(msg="service not found: %s" % self.name) - elif location.get('rc-service', None) and not location.get('systemctl', None): - # service is managed by OpenRC - self.svc_cmd = location['rc-service'] - self.enable_cmd = location['rc-update'] - return - elif check_systemd(self.name): - # service is managed by systemd - self.enable_cmd = location['systemctl'] - elif location['initctl'] and os.path.exists("/etc/init/%s.conf" % self.name): - # service is managed by upstart - self.enable_cmd = location['initctl'] - - # if this service is managed via upstart, get the current upstart version - if self.enable_cmd == location['initctl']: - # default the upstart version to something we can compare against - self.upstart_version = LooseVersion('0.0.0') - try: - # set the upstart version based on the output of 'initctl version' - version_re = re.compile(r'\(upstart (.*)\)') - rc,stdout,stderr = self.module.run_command('initctl version') - if rc == 0: - res = version_re.search(stdout) - if res: - self.upstart_version = LooseVersion(res.groups()[0]) - except: - # we'll use the default of 0.0.0 since we couldn't - # detect the current upstart version above - pass - - # Locate a tool for runtime service management (start, stop etc.) - if location.get('service', None) and os.path.exists("/etc/init.d/%s" % self.name): - # SysV init script - self.svc_cmd = location['service'] - elif location.get('start', None) and os.path.exists("/etc/init/%s.conf" % self.name): - # upstart -- rather than being managed by one command, start/stop/restart are actual commands - self.svc_cmd = '' - else: - # still a SysV init script, but /sbin/service isn't installed - for initdir in initpaths: - initscript = "%s/%s" % (initdir,self.name) - if os.path.isfile(initscript): - self.svc_initscript = initscript - - # couldn't find anything yet, assume systemd - if self.svc_cmd is None and self.svc_initscript is None: - if location.get('systemctl'): - self.svc_cmd = location['systemctl'] - - if self.svc_cmd is None and not self.svc_initscript: - self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting') - - if location.get('initctl', None): - self.svc_initctl = location['initctl'] - - def get_systemd_status_dict(self): - (rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit,)) - if rc != 0: - self.module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, self.__systemd_unit, err)) - return dict(line.split('=', 1) for line in out.splitlines()) - - def get_systemd_service_status(self): - d = self.get_systemd_status_dict() - if d.get('ActiveState') == 'active': - # run-once services (for which a single successful exit indicates - # that they are running as designed) should not be restarted here. - # Thus, we are not checking d['SubState']. - self.running = True - self.crashed = False - elif d.get('ActiveState') == 'failed': - self.running = False - self.crashed = True - elif d.get('ActiveState') is None: - self.module.fail_json(msg='No ActiveState value in systemctl show output for %r' % (self.__systemd_unit,)) - else: - self.running = False - self.crashed = False - return self.running - - def get_service_status(self): - if self.svc_cmd and self.svc_cmd.endswith('systemctl'): - return self.get_systemd_service_status() - - self.action = "status" - rc, status_stdout, status_stderr = self.service_control() - - # if we have decided the service is managed by upstart, we check for some additional output... - if self.svc_initctl and self.running is None: - # check the job status by upstart response - initctl_rc, initctl_status_stdout, initctl_status_stderr = self.execute_command("%s status %s" % (self.svc_initctl, self.name)) - if "stop/waiting" in initctl_status_stdout: - self.running = False - elif "start/running" in initctl_status_stdout: - self.running = True - - if self.svc_cmd and self.svc_cmd.endswith("rc-service") and self.running is None: - openrc_rc, openrc_status_stdout, openrc_status_stderr = self.execute_command("%s %s status" % (self.svc_cmd, self.name)) - self.running = "started" in openrc_status_stdout - self.crashed = "crashed" in openrc_status_stderr - - # if the job status is still not known check it by status output keywords - if self.running is None: - # first transform the status output that could irritate keyword matching - cleanout = status_stdout.lower().replace(self.name.lower(), '') - if "stop" in cleanout: - self.running = False - elif "run" in cleanout and "not" in cleanout: - self.running = False - elif "run" in cleanout and "not" not in cleanout: - self.running = True - elif "start" in cleanout and "not" not in cleanout: - self.running = True - elif 'could not access pid file' in cleanout: - self.running = False - elif 'is dead and pid file exists' in cleanout: - self.running = False - elif 'dead but subsys locked' in cleanout: - self.running = False - elif 'dead but pid file exists' in cleanout: - self.running = False - - # if the job status is still not known check it by response code - # For reference, see: - # http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html - if self.running is None: - if rc in [1, 2, 3, 4, 69]: - self.running = False - elif rc == 0: - self.running = True - - # if the job status is still not known check it by special conditions - if self.running is None: - if self.name == 'iptables' and "ACCEPT" in status_stdout: - # iptables status command output is lame - # TODO: lookup if we can use a return code for this instead? - self.running = True - - return self.running - - - def service_enable(self): - - if self.enable_cmd is None: - self.module.fail_json(msg='cannot detect command to enable service %s, typo or init system potentially unknown' % self.name) - - # FIXME: we use chkconfig or systemctl - # to decide whether to run the command here but need something - # similar for upstart - - if self.enable_cmd.endswith("initctl"): - def write_to_override_file(file_name, file_contents, ): - override_file = open(file_name, 'w') - override_file.write(file_contents) - override_file.close() - - initpath = '/etc/init' - if self.upstart_version >= LooseVersion('0.6.7'): - manreg = re.compile('^manual\s*$', re.M | re.I) - config_line = 'manual\n' - else: - manreg = re.compile('^start on manual\s*$', re.M | re.I) - config_line = 'start on manual\n' - conf_file_name = "%s/%s.conf" % (initpath, self.name) - override_file_name = "%s/%s.override" % (initpath, self.name) - - # Check to see if files contain the manual line in .conf and fail if True - if manreg.search(open(conf_file_name).read()): - self.module.fail_json(msg="manual stanza not supported in a .conf file") - - if os.path.exists(override_file_name): - override_file_contents = open(override_file_name).read() - # Remove manual stanza if present and service enabled - if self.enable and manreg.search(override_file_contents): - write_to_override_file(override_file_name, manreg.sub('', override_file_contents)) - # Add manual stanza if not present and service disabled - elif not (self.enable) and not (manreg.search(override_file_contents)): - write_to_override_file(override_file_name, override_file_contents + '\n' + config_line) - else: - return - # Add file with manual stanza if service disabled - elif not (self.enable): - write_to_override_file(override_file_name, config_line) - else: - return - - if self.enable_cmd.endswith("chkconfig"): - (rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name)) - if 'chkconfig --add %s' % self.name in err: - self.execute_command("%s --add %s" % (self.enable_cmd, self.name)) - (rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name)) - if not self.name in out: - self.module.fail_json(msg="service %s does not support chkconfig" % self.name) - state = out.split()[-1] - if self.enable and ( "3:on" in out and "5:on" in out ): - return - elif not self.enable and ( "3:off" in out and "5:off" in out ): - return - - if self.enable_cmd.endswith("systemctl"): - d = self.get_systemd_status_dict() - if "UnitFileState" in d: - if self.enable and d["UnitFileState"] == "enabled": - return - elif not self.enable and d["UnitFileState"] == "disabled": - return - elif not self.enable: - return - - if self.enable_cmd.endswith("rc-update"): - (rc, out, err) = self.execute_command("%s show" % self.enable_cmd) - for line in out.splitlines(): - service_name, runlevels = line.split('|') - service_name = service_name.strip() - if service_name != self.name: - continue - runlevels = re.split(r'\s+', runlevels) - # service already enabled for the runlevel - if self.enable and self.runlevel in runlevels: - return - # service already disabled for the runlevel - elif not self.enable and self.runlevel not in runlevels: - return - break - else: - # service already disabled altogether - if not self.enable: - return - - if self.enable_cmd.endswith("update-rc.d"): - if self.enable: - action = 'enable' - else: - action = 'disable' - - (rc, out, err) = self.execute_command("%s -n %s %s" \ - % (self.enable_cmd, self.name, action)) - self.changed = False - for line in out.splitlines(): - if line.startswith('rename'): - self.changed = True - break - elif self.enable and 'do not exist' in line: - self.changed = True - break - elif not self.enable and 'already exist' in line: - self.changed = True - break - - # Debian compatibility - for line in err.splitlines(): - if self.enable and 'no runlevel symlinks to modify' in line: - self.changed = True - break - - if self.module.check_mode: - self.module.exit_json(changed=self.changed) - - if not self.changed: - return - - if self.enable: - # make sure the init.d symlinks are created - # otherwise enable might not work - (rc, out, err) = self.execute_command("%s %s defaults" \ - % (self.enable_cmd, self.name)) - if rc != 0: - return (rc, out, err) - - return self.execute_command("%s %s enable" % (self.enable_cmd, self.name)) - else: - return self.execute_command("%s %s disable" % (self.enable_cmd, - self.name)) - - # we change argument depending on real binary used: - # - update-rc.d and systemctl wants enable/disable - # - chkconfig wants on/off - # - rc-update wants add/delete - # also, rc-update and systemctl needs the argument order reversed - if self.enable: - on_off = "on" - enable_disable = "enable" - add_delete = "add" - else: - on_off = "off" - enable_disable = "disable" - add_delete = "delete" - - if self.enable_cmd.endswith("rc-update"): - args = (self.enable_cmd, add_delete, self.name + " " + self.runlevel) - elif self.enable_cmd.endswith("systemctl"): - args = (self.enable_cmd, enable_disable, self.__systemd_unit) - else: - args = (self.enable_cmd, self.name, on_off) - - self.changed = True - - if self.module.check_mode and self.changed: - self.module.exit_json(changed=True) - - return self.execute_command("%s %s %s" % args) - - - def service_control(self): - - # Decide what command to run - svc_cmd = '' - arguments = self.arguments - if self.svc_cmd: - if not self.svc_cmd.endswith("systemctl"): - # SysV and OpenRC take the form - svc_cmd = "%s %s" % (self.svc_cmd, self.name) - else: - # systemd commands take the form - svc_cmd = self.svc_cmd - arguments = "%s %s" % (self.__systemd_unit, arguments) - elif self.svc_initscript: - # upstart - svc_cmd = "%s" % self.svc_initscript - - # In OpenRC, if a service crashed, we need to reset its status to - # stopped with the zap command, before we can start it back. - if self.svc_cmd and self.svc_cmd.endswith('rc-service') and self.action == 'start' and self.crashed: - self.execute_command("%s zap" % svc_cmd, daemonize=True) - - if self.action is not "restart": - if svc_cmd != '': - # upstart or systemd or OpenRC - rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True) - else: - # SysV - rc_state, stdout, stderr = self.execute_command("%s %s %s" % (self.action, self.name, arguments), daemonize=True) - elif self.svc_cmd and self.svc_cmd.endswith('rc-service'): - # All services in OpenRC support restart. - rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True) - else: - # In other systems, not all services support restart. Do it the hard way. - if svc_cmd != '': - # upstart or systemd - rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % (svc_cmd, 'stop', arguments), daemonize=True) - else: - # SysV - rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % ('stop', self.name, arguments), daemonize=True) - - if self.sleep: - time.sleep(self.sleep) - - if svc_cmd != '': - # upstart or systemd - rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % (svc_cmd, 'start', arguments), daemonize=True) - else: - # SysV - rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % ('start', self.name, arguments), daemonize=True) - - # merge return information - if rc1 != 0 and rc2 == 0: - rc_state = rc2 - stdout = stdout2 - stderr = stderr2 - else: - rc_state = rc1 + rc2 - stdout = stdout1 + stdout2 - stderr = stderr1 + stderr2 - - return(rc_state, stdout, stderr) - -# =========================================== -# Subclass: FreeBSD - -class FreeBsdService(Service): - """ - This is the FreeBSD Service manipulation class - it uses the /etc/rc.conf - file for controlling services started at boot and the 'service' binary to - check status and perform direct service manipulation. - """ - - platform = 'FreeBSD' - distribution = None - - def get_service_tools(self): - self.svc_cmd = self.module.get_bin_path('service', True) - - if not self.svc_cmd: - self.module.fail_json(msg='unable to find service binary') - - def get_service_status(self): - rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'onestatus', self.arguments)) - if rc == 1: - self.running = False - elif rc == 0: - self.running = True - - def service_enable(self): - if self.enable: - self.rcconf_value = "YES" - else: - self.rcconf_value = "NO" - - rcfiles = [ '/etc/rc.conf','/etc/rc.conf.local', '/usr/local/etc/rc.conf' ] - for rcfile in rcfiles: - if os.path.isfile(rcfile): - self.rcconf_file = rcfile - - rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments)) - cmd = "%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments) - rcvars = shlex.split(stdout, comments=True) - - if not rcvars: - self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr) - - # In rare cases, i.e. sendmail, rcvar can return several key=value pairs - # Usually there is just one, however. In other rare cases, i.e. uwsgi, - # rcvar can return extra uncommented data that is not at all related to - # the rcvar. We will just take the first key=value pair we come across - # and hope for the best. - for rcvar in rcvars: - if '=' in rcvar: - self.rcconf_key = rcvar.split('=')[0] - break - - if self.rcconf_key is None: - self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr) - - return self.service_enable_rcconf() - - def service_control(self): - - if self.action is "start": - self.action = "onestart" - if self.action is "stop": - self.action = "onestop" - if self.action is "reload": - self.action = "onereload" - - return self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, self.action, self.arguments)) - -# =========================================== -# Subclass: OpenBSD - -class OpenBsdService(Service): - """ - This is the OpenBSD Service manipulation class - it uses /etc/rc.d for - service control. Enabling a service is currently not supported because the - _flags variable is not boolean, you should supply a rc.conf.local - file in some other way. - """ - - platform = 'OpenBSD' - distribution = None - - def get_service_tools(self): - rcdir = '/etc/rc.d' - - rc_script = "%s/%s" % (rcdir, self.name) - if os.path.isfile(rc_script): - self.svc_cmd = rc_script - - if not self.svc_cmd: - self.module.fail_json(msg='unable to find rc.d script') - - def get_service_status(self): - rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check')) - if rc == 1: - self.running = False - elif rc == 0: - self.running = True - - def service_control(self): - return self.execute_command("%s %s" % (self.svc_cmd, self.action)) - -# =========================================== -# Subclass: NetBSD - -class NetBsdService(Service): - """ - This is the NetBSD Service manipulation class - it uses the /etc/rc.conf - file for controlling services started at boot, check status and perform - direct service manipulation. Init scripts in /etc/rcd are used for - controlling services (start/stop) as well as for controlling the current - state. - """ - - platform = 'NetBSD' - distribution = None - - def get_service_tools(self): - initpaths = [ '/etc/rc.d' ] # better: $rc_directories - how to get in here? Run: sh -c '. /etc/rc.conf ; echo $rc_directories' - - for initdir in initpaths: - initscript = "%s/%s" % (initdir,self.name) - if os.path.isfile(initscript): - self.svc_initscript = initscript - - if not self.svc_initscript: - self.module.fail_json(msg='unable to find rc.d script') - - def service_enable(self): - if self.enable: - self.rcconf_value = "YES" - else: - self.rcconf_value = "NO" - - rcfiles = [ '/etc/rc.conf' ] # Overkill? - for rcfile in rcfiles: - if os.path.isfile(rcfile): - self.rcconf_file = rcfile - - self.rcconf_key = "%s" % string.replace(self.name,"-","_") - - return self.service_enable_rcconf() - - def get_service_status(self): - self.svc_cmd = "%s" % self.svc_initscript - rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'onestatus')) - if rc == 1: - self.running = False - elif rc == 0: - self.running = True - - def service_control(self): - if self.action is "start": - self.action = "onestart" - if self.action is "stop": - self.action = "onestop" - - self.svc_cmd = "%s" % self.svc_initscript - return self.execute_command("%s %s" % (self.svc_cmd, self.action), daemonize=True) - -# =========================================== -# Subclass: SunOS -class SunOSService(Service): - """ - This is the SunOS Service manipulation class - it uses the svcadm - command for controlling services, and svcs command for checking status. - It also tries to be smart about taking the service out of maintenance - state if necessary. - """ - platform = 'SunOS' - distribution = None - - def get_service_tools(self): - self.svcs_cmd = self.module.get_bin_path('svcs', True) - - if not self.svcs_cmd: - self.module.fail_json(msg='unable to find svcs binary') - - self.svcadm_cmd = self.module.get_bin_path('svcadm', True) - - if not self.svcadm_cmd: - self.module.fail_json(msg='unable to find svcadm binary') - - def get_service_status(self): - status = self.get_sunos_svcs_status() - # Only 'online' is considered properly running. Everything else is off - # or has some sort of problem. - if status == 'online': - self.running = True - else: - self.running = False - - def get_sunos_svcs_status(self): - rc, stdout, stderr = self.execute_command("%s %s" % (self.svcs_cmd, self.name)) - if rc == 1: - if stderr: - self.module.fail_json(msg=stderr) - else: - self.module.fail_json(msg=stdout) - - lines = stdout.rstrip("\n").split("\n") - status = lines[-1].split(" ")[0] - # status is one of: online, offline, degraded, disabled, maintenance, uninitialized - # see man svcs(1) - return status - - def service_enable(self): - # Get current service enablement status - rc, stdout, stderr = self.execute_command("%s -l %s" % (self.svcs_cmd, self.name)) - - if rc != 0: - if stderr: - self.module.fail_json(msg=stderr) - else: - self.module.fail_json(msg=stdout) - - enabled = False - temporary = False - - # look for enabled line, which could be one of: - # enabled true (temporary) - # enabled false (temporary) - # enabled true - # enabled false - for line in stdout.split("\n"): - if line.startswith("enabled"): - if "true" in line: - enabled = True - if "temporary" in line: - temporary = True - - startup_enabled = (enabled and not temporary) or (not enabled and temporary) - - if self.enable and startup_enabled: - return - elif (not self.enable) and (not startup_enabled): - return - - # Mark service as started or stopped (this will have the side effect of - # actually stopping or starting the service) - if self.enable: - subcmd = "enable -rs" - else: - subcmd = "disable -s" - - rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name)) - - if rc != 0: - if stderr: - self.module.fail_json(msg=stderr) - else: - self.module.fail_json(msg=stdout) - - self.changed = True - - - def service_control(self): - status = self.get_sunos_svcs_status() - - # if starting or reloading, clear maintenace states - if self.action in ['start', 'reload', 'restart'] and status in ['maintenance', 'degraded']: - rc, stdout, stderr = self.execute_command("%s clear %s" % (self.svcadm_cmd, self.name)) - if rc != 0: - return rc, stdout, stderr - status = self.get_sunos_svcs_status() - - if status in ['maintenance', 'degraded']: - self.module.fail_json(msg="Failed to bring service out of %s status." % status) - - if self.action == 'start': - subcmd = "enable -rst" - elif self.action == 'stop': - subcmd = "disable -st" - elif self.action == 'reload': - subcmd = "refresh" - elif self.action == 'restart' and status == 'online': - subcmd = "restart" - elif self.action == 'restart' and status != 'online': - subcmd = "enable -rst" - - return self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name)) - -# =========================================== -# Subclass: AIX - -class AIX(Service): - """ - This is the AIX Service (SRC) manipulation class - it uses lssrc, startsrc, stopsrc - and refresh for service control. Enabling a service is currently not supported. - Would require to add an entry in the /etc/inittab file (mkitab, chitab and rmitab - commands) - """ - - platform = 'AIX' - distribution = None - - def get_service_tools(self): - self.lssrc_cmd = self.module.get_bin_path('lssrc', True) - - if not self.lssrc_cmd: - self.module.fail_json(msg='unable to find lssrc binary') - - self.startsrc_cmd = self.module.get_bin_path('startsrc', True) - - if not self.startsrc_cmd: - self.module.fail_json(msg='unable to find startsrc binary') - - self.stopsrc_cmd = self.module.get_bin_path('stopsrc', True) - - if not self.stopsrc_cmd: - self.module.fail_json(msg='unable to find stopsrc binary') - - self.refresh_cmd = self.module.get_bin_path('refresh', True) - - if not self.refresh_cmd: - self.module.fail_json(msg='unable to find refresh binary') - - - def get_service_status(self): - status = self.get_aix_src_status() - # Only 'active' is considered properly running. Everything else is off - # or has some sort of problem. - if status == 'active': - self.running = True - else: - self.running = False - - def get_aix_src_status(self): - rc, stdout, stderr = self.execute_command("%s -s %s" % (self.lssrc_cmd, self.name)) - if rc == 1: - if stderr: - self.module.fail_json(msg=stderr) - else: - self.module.fail_json(msg=stdout) - - lines = stdout.rstrip("\n").split("\n") - status = lines[-1].split(" ")[-1] - # status is one of: active, inoperative - return status - - def service_control(self): - if self.action == 'start': - srccmd = self.startsrc_cmd - elif self.action == 'stop': - srccmd = self.stopsrc_cmd - elif self.action == 'reload': - srccmd = self.refresh_cmd - elif self.action == 'restart': - self.execute_command("%s -s %s" % (self.stopsrc_cmd, self.name)) - srccmd = self.startsrc_cmd - - if self.arguments and self.action == 'start': - return self.execute_command("%s -a \"%s\" -s %s" % (srccmd, self.arguments, self.name)) - else: - return self.execute_command("%s -s %s" % (srccmd, self.name)) - - -# =========================================== -# Main control flow - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - state = dict(choices=['running', 'started', 'stopped', 'restarted', 'reloaded']), - sleep = dict(required=False, type='int', default=None), - pattern = dict(required=False, default=None), - enabled = dict(type='bool'), - runlevel = dict(required=False, default='default'), - arguments = dict(aliases=['args'], default=''), - ), - supports_check_mode=True - ) - if module.params['state'] is None and module.params['enabled'] is None: - module.fail_json(msg="Neither 'state' nor 'enabled' set") - - service = Service(module) - - if service.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'Service instantiated - platform %s' % service.platform) - if service.distribution: - syslog.syslog(syslog.LOG_NOTICE, 'Service instantiated - distribution %s' % service.distribution) - - rc = 0 - out = '' - err = '' - result = {} - result['name'] = service.name - - # Find service management tools - service.get_service_tools() - - # Enable/disable service startup at boot if requested - if service.module.params['enabled'] is not None: - # FIXME: ideally this should detect if we need to toggle the enablement state, though - # it's unlikely the changed handler would need to fire in this case so it's a minor thing. - service.service_enable() - result['enabled'] = service.enable - - if module.params['state'] is None: - # Not changing the running state, so bail out now. - result['changed'] = service.changed - module.exit_json(**result) - - result['state'] = service.state - - # Collect service status - if service.pattern: - service.check_ps() - else: - service.get_service_status() - - # Calculate if request will change service state - service.check_service_changed() - - # Modify service state if necessary - (rc, out, err) = service.modify_service_state() - - if rc != 0: - if err and "is already" in err: - # upstart got confused, one such possibility is MySQL on Ubuntu 12.04 - # where status may report it has no start/stop links and we could - # not get accurate status - pass - else: - if err: - module.fail_json(msg=err) - else: - module.fail_json(msg=out) - - result['changed'] = service.changed | service.svc_change - if service.module.params['enabled'] is not None: - result['enabled'] = service.module.params['enabled'] - - if not service.module.params['state']: - status = service.get_service_status() - if status is None: - result['state'] = 'absent' - elif status is False: - result['state'] = 'started' - else: - result['state'] = 'stopped' - else: - # as we may have just bounced the service the service command may not - # report accurate state at this moment so just show what we ran - if service.module.params['state'] in ['started','restarted','running','reloaded']: - result['state'] = 'started' - else: - result['state'] = 'stopped' - - module.exit_json(**result) - -from ansible.module_utils.basic import * -main() diff --git a/library/system/setup b/library/system/setup deleted file mode 100644 index 486304230bf..00000000000 --- a/library/system/setup +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: setup -version_added: historical -short_description: Gathers facts about remote hosts -options: - filter: - version_added: "1.1" - description: - - if supplied, only return facts that match this shell-style (fnmatch) wildcard. - required: false - default: '*' - fact_path: - version_added: "1.3" - description: - - path used for local ansible facts (*.fact) - files in this dir - will be run (if executable) and their results be added to ansible_local facts - if a file is not executable it is read. - File/results format can be json or ini-format - required: false - default: '/etc/ansible/facts.d' -description: - - This module is automatically called by playbooks to gather useful - variables about remote hosts that can be used in playbooks. It can also be - executed directly by C(/usr/bin/ansible) to check what variables are - available to a host. Ansible provides many I(facts) about the system, - automatically. -notes: - - More ansible facts will be added with successive releases. If I(facter) or - I(ohai) are installed, variables from these programs will also be snapshotted - into the JSON file for usage in templating. These variables are prefixed - with C(facter_) and C(ohai_) so it's easy to tell their source. All variables are - bubbled up to the caller. Using the ansible facts and choosing to not - install I(facter) and I(ohai) means you can avoid Ruby-dependencies on your - remote systems. (See also M(facter) and M(ohai).) - - The filter option filters only the first level subkey below ansible_facts. - - If the target host is Windows, you will not currently have the ability to use - C(fact_path) or C(filter) as this is provided by a simpler implementation of the module. - Different facts are returned for Windows hosts. -author: Michael DeHaan -''' - -EXAMPLES = """ -# Display facts from all hosts and store them indexed by I(hostname) at C(/tmp/facts). -ansible all -m setup --tree /tmp/facts - -# Display only facts regarding memory found by ansible on all hosts and output them. -ansible all -m setup -a 'filter=ansible_*_mb' - -# Display only facts returned by facter. -ansible all -m setup -a 'filter=facter_*' - -# Display only facts about certain interfaces. -ansible all -m setup -a 'filter=ansible_eth[0-2]' -""" - - -def run_setup(module): - - setup_options = dict(module_setup=True) - facts = ansible_facts(module) - - for (k, v) in facts.items(): - setup_options["ansible_%s" % k.replace('-', '_')] = v - - # Look for the path to the facter and ohai binary and set - # the variable to that path. - facter_path = module.get_bin_path('facter') - ohai_path = module.get_bin_path('ohai') - - # if facter is installed, and we can use --json because - # ruby-json is ALSO installed, include facter data in the JSON - if facter_path is not None: - rc, out, err = module.run_command(facter_path + " --puppet --json") - facter = True - try: - facter_ds = json.loads(out) - except: - facter = False - if facter: - for (k,v) in facter_ds.items(): - setup_options["facter_%s" % k] = v - - # ditto for ohai - if ohai_path is not None: - rc, out, err = module.run_command(ohai_path) - ohai = True - try: - ohai_ds = json.loads(out) - except: - ohai = False - if ohai: - for (k,v) in ohai_ds.items(): - k2 = "ohai_%s" % k.replace('-', '_') - setup_options[k2] = v - - setup_result = { 'ansible_facts': {} } - - for (k,v) in setup_options.items(): - if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']): - setup_result['ansible_facts'][k] = v - - # hack to keep --verbose from showing all the setup module results - setup_result['verbose_override'] = True - - return setup_result - -def main(): - global module - module = AnsibleModule( - argument_spec = dict( - filter=dict(default="*", required=False), - fact_path=dict(default='/etc/ansible/facts.d', required=False), - ), - supports_check_mode = True, - ) - data = run_setup(module) - module.exit_json(**data) - -# import module snippets - -from ansible.module_utils.basic import * - -from ansible.module_utils.facts import * - -main() diff --git a/library/system/sysctl b/library/system/sysctl deleted file mode 100644 index acf6395f071..00000000000 --- a/library/system/sysctl +++ /dev/null @@ -1,334 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, David "DaviXX" CHANIAL -# (c) 2014, James Tanner -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: sysctl -short_description: Manage entries in sysctl.conf. -description: - - This module manipulates sysctl entries and optionally performs a C(/sbin/sysctl -p) after changing them. -version_added: "1.0" -options: - name: - description: - - The dot-separated path (aka I(key)) specifying the sysctl variable. - required: true - default: null - aliases: [ 'key' ] - value: - description: - - Desired value of the sysctl key. - required: false - default: null - aliases: [ 'val' ] - state: - description: - - Whether the entry should be present or absent in the sysctl file. - choices: [ "present", "absent" ] - default: present - ignoreerrors: - description: - - Use this option to ignore errors about unknown keys. - choices: [ "yes", "no" ] - default: no - reload: - description: - - If C(yes), performs a I(/sbin/sysctl -p) if the C(sysctl_file) is - updated. If C(no), does not reload I(sysctl) even if the - C(sysctl_file) is updated. - choices: [ "yes", "no" ] - default: "yes" - sysctl_file: - description: - - Specifies the absolute path to C(sysctl.conf), if not C(/etc/sysctl.conf). - required: false - default: /etc/sysctl.conf - sysctl_set: - description: - - Verify token value with the sysctl command and set with -w if necessary - choices: [ "yes", "no" ] - required: false - version_added: 1.5 - default: False -notes: [] -requirements: [] -author: David "DaviXX" CHANIAL -''' - -EXAMPLES = ''' -# Set vm.swappiness to 5 in /etc/sysctl.conf -- sysctl: name=vm.swappiness value=5 state=present - -# Remove kernel.panic entry from /etc/sysctl.conf -- sysctl: name=kernel.panic state=absent sysctl_file=/etc/sysctl.conf - -# Set kernel.panic to 3 in /tmp/test_sysctl.conf -- sysctl: name=kernel.panic value=3 sysctl_file=/tmp/test_sysctl.conf reload=no - -# Set ip fowarding on in /proc and do not reload the sysctl file -- sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes - -# Set ip forwarding on in /proc and in the sysctl file and reload if necessary -- sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes state=present reload=yes -''' - -# ============================================================== - -import os -import tempfile -import re - -class SysctlModule(object): - - def __init__(self, module): - self.module = module - self.args = self.module.params - - self.sysctl_cmd = self.module.get_bin_path('sysctl', required=True) - self.sysctl_file = self.args['sysctl_file'] - - self.proc_value = None # current token value in proc fs - self.file_value = None # current token value in file - self.file_lines = [] # all lines in the file - self.file_values = {} # dict of token values - - self.changed = False # will change occur - self.set_proc = False # does sysctl need to set value - self.write_file = False # does the sysctl file need to be reloaded - - self.process() - - # ============================================================== - # LOGIC - # ============================================================== - - def process(self): - - # Whitespace is bad - self.args['name'] = self.args['name'].strip() - self.args['value'] = self._parse_value(self.args['value']) - - thisname = self.args['name'] - - # get the current proc fs value - self.proc_value = self.get_token_curr_value(thisname) - - # get the currect sysctl file value - self.read_sysctl_file() - if thisname not in self.file_values: - self.file_values[thisname] = None - - # update file contents with desired token/value - self.fix_lines() - - # what do we need to do now? - if self.file_values[thisname] is None and self.args['state'] == "present": - self.changed = True - self.write_file = True - elif self.file_values[thisname] is None and self.args['state'] == "absent": - self.changed = False - elif self.file_values[thisname] != self.args['value']: - self.changed = True - self.write_file = True - - # use the sysctl command or not? - if self.args['sysctl_set']: - if self.proc_value is None: - self.changed = True - elif not self._values_is_equal(self.proc_value, self.args['value']): - self.changed = True - self.set_proc = True - - # Do the work - if not self.module.check_mode: - if self.write_file: - self.write_sysctl() - if self.write_file and self.args['reload']: - self.reload_sysctl() - if self.set_proc: - self.set_token_value(self.args['name'], self.args['value']) - - def _values_is_equal(self, a, b): - """Expects two string values. It will split the string by whitespace - and compare each value. It will return True if both lists are the same, - contain the same elements and the same order.""" - if a is None or b is None: - return False - - a = a.split() - b = b.split() - - if len(a) != len(b): - return False - - return len([i for i, j in zip(a, b) if i == j]) == len(a) - - def _parse_value(self, value): - if value is None: - return '' - elif value.lower() in BOOLEANS_TRUE: - return '1' - elif value.lower() in BOOLEANS_FALSE: - return '0' - else: - return value.strip() - - # ============================================================== - # SYSCTL COMMAND MANAGEMENT - # ============================================================== - - # Use the sysctl command to find the current value - def get_token_curr_value(self, token): - thiscmd = "%s -e -n %s" % (self.sysctl_cmd, token) - rc,out,err = self.module.run_command(thiscmd) - if rc != 0: - return None - else: - return out - - # Use the sysctl command to set the current value - def set_token_value(self, token, value): - if len(value.split()) > 0: - value = '"' + value + '"' - thiscmd = "%s -w %s=%s" % (self.sysctl_cmd, token, value) - rc,out,err = self.module.run_command(thiscmd) - if rc != 0: - self.module.fail_json(msg='setting %s failed: %s' % (token, out + err)) - else: - return rc - - # Run sysctl -p - def reload_sysctl(self): - # do it - if get_platform().lower() == 'freebsd': - # freebsd doesn't support -p, so reload the sysctl service - rc,out,err = self.module.run_command('/etc/rc.d/sysctl reload') - else: - # system supports reloading via the -p flag to sysctl, so we'll use that - sysctl_args = [self.sysctl_cmd, '-p', self.sysctl_file] - if self.args['ignoreerrors']: - sysctl_args.insert(1, '-e') - - rc,out,err = self.module.run_command(sysctl_args) - - if rc != 0: - self.module.fail_json(msg="Failed to reload sysctl: %s" % str(out) + str(err)) - - # ============================================================== - # SYSCTL FILE MANAGEMENT - # ============================================================== - - # Get the token value from the sysctl file - def read_sysctl_file(self): - - lines = [] - if os.path.isfile(self.sysctl_file): - try: - f = open(self.sysctl_file, "r") - lines = f.readlines() - f.close() - except IOError, e: - self.module.fail_json(msg="Failed to open %s: %s" % (self.sysctl_file, str(e))) - - for line in lines: - line = line.strip() - self.file_lines.append(line) - - # don't split empty lines or comments - if not line or line.startswith("#"): - continue - - k, v = line.split('=',1) - k = k.strip() - v = v.strip() - self.file_values[k] = v.strip() - - # Fix the value in the sysctl file content - def fix_lines(self): - checked = [] - self.fixed_lines = [] - for line in self.file_lines: - if not line.strip() or line.strip().startswith("#"): - self.fixed_lines.append(line) - continue - tmpline = line.strip() - k, v = line.split('=',1) - k = k.strip() - v = v.strip() - if k not in checked: - checked.append(k) - if k == self.args['name']: - if self.args['state'] == "present": - new_line = "%s = %s\n" % (k, self.args['value']) - self.fixed_lines.append(new_line) - else: - new_line = "%s = %s\n" % (k, v) - self.fixed_lines.append(new_line) - - if self.args['name'] not in checked and self.args['state'] == "present": - new_line = "%s=%s\n" % (self.args['name'], self.args['value']) - self.fixed_lines.append(new_line) - - # Completely rewrite the sysctl file - def write_sysctl(self): - # open a tmp file - fd, tmp_path = tempfile.mkstemp('.conf', '.ansible_m_sysctl_', os.path.dirname(self.sysctl_file)) - f = open(tmp_path,"w") - try: - for l in self.fixed_lines: - f.write(l.strip() + "\n") - except IOError, e: - self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, str(e))) - f.flush() - f.close() - - # replace the real one - self.module.atomic_move(tmp_path, self.sysctl_file) - - -# ============================================================== -# main - -def main(): - - # defining module - module = AnsibleModule( - argument_spec = dict( - name = dict(aliases=['key'], required=True), - value = dict(aliases=['val'], required=False), - state = dict(default='present', choices=['present', 'absent']), - reload = dict(default=True, type='bool'), - sysctl_set = dict(default=False, type='bool'), - ignoreerrors = dict(default=False, type='bool'), - sysctl_file = dict(default='/etc/sysctl.conf') - ), - supports_check_mode=True - ) - - result = SysctlModule(module) - - module.exit_json(changed=result.changed) - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/ufw b/library/system/ufw deleted file mode 100644 index e917a3bc749..00000000000 --- a/library/system/ufw +++ /dev/null @@ -1,269 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Ahti Kitsik -# (c) 2014, Jarno Keskikangas -# (c) 2013, Aleksey Ovcharenko -# (c) 2013, James Martin -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ufw -short_description: Manage firewall with UFW -description: - - Manage firewall with UFW. -version_added: 1.6 -author: Aleksey Ovcharenko, Jarno Keskikangas, Ahti Kitsik -notes: - - See C(man ufw) for more examples. -requirements: - - C(ufw) package -options: - state: - description: - - C(enabled) reloads firewall and enables firewall on boot. - - C(disabled) unloads firewall and disables firewall on boot. - - C(reloaded) reloads firewall. - - C(reset) disables and resets firewall to installation defaults. - required: false - choices: ['enabled', 'disabled', 'reloaded', 'reset'] - policy: - description: - - Change the default policy for incoming or outgoing traffic. - required: false - alias: default - choices: ['allow', 'deny', 'reject'] - direction: - description: - - Select direction for a rule or default policy command. - required: false - choices: ['in', 'out', 'incoming', 'outgoing'] - logging: - description: - - Toggles logging. Logged packets use the LOG_KERN syslog facility. - choices: ['on', 'off', 'low', 'medium', 'high', 'full'] - required: false - insert: - description: - - Insert the corresponding rule as rule number NUM - required: false - rule: - description: - - Add firewall rule - required: false - choices: ['allow', 'deny', 'reject', 'limit'] - log: - description: - - Log new connections matched to this rule - required: false - choices: ['yes', 'no'] - from_ip: - description: - - Source IP address. - required: false - aliases: ['from', 'src'] - default: 'any' - from_port: - description: - - Source port. - required: false - to_ip: - description: - - Destination IP address. - required: false - aliases: ['to', 'dest'] - default: 'any' - to_port: - description: - - Destination port. - required: false - aliases: ['port'] - proto: - description: - - TCP/IP protocol. - choices: ['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah'] - required: false - name: - description: - - Use profile located in C(/etc/ufw/applications.d) - required: false - aliases: ['app'] - delete: - description: - - Delete rule. - required: false - choices: ['yes', 'no'] - interface: - description: - - Specify interface for rule. - required: false - aliases: ['if'] -''' - -EXAMPLES = ''' -# Allow everything and enable UFW -ufw: state=enabled policy=allow - -# Set logging -ufw: logging=on - -# Sometimes it is desirable to let the sender know when traffic is -# being denied, rather than simply ignoring it. In these cases, use -# reject instead of deny. In addition, log rejected connections: -ufw: rule=reject port=auth log=yes - -# ufw supports connection rate limiting, which is useful for protecting -# against brute-force login attacks. ufw will deny connections if an IP -# address has attempted to initiate 6 or more connections in the last -# 30 seconds. See http://www.debian-administration.org/articles/187 -# for details. Typical usage is: -ufw: rule=limit port=ssh proto=tcp - -# Allow OpenSSH -ufw: rule=allow name=OpenSSH - -# Delete OpenSSH rule -ufw: rule=allow name=OpenSSH delete=yes - -# Deny all access to port 53: -ufw: rule=deny port=53 - -# Allow all access to tcp port 80: -ufw: rule=allow port=80 proto=tcp - -# Allow all access from RFC1918 networks to this host: -ufw: rule=allow src={{ item }} -with_items: -- 10.0.0.0/8 -- 172.16.0.0/12 -- 192.168.0.0/16 - -# Deny access to udp port 514 from host 1.2.3.4: -ufw: rule=deny proto=udp src=1.2.3.4 port=514 - -# Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469 -ufw: rule=allow interface=eth0 direction=in proto=udp src=1.2.3.5 from_port=5469 dest=1.2.3.4 to_port=5469 - -# Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host. -# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work. -ufw: rule=deny proto=tcp src=2001:db8::/32 port=25 -''' - -from operator import itemgetter - - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(default=None, choices=['enabled', 'disabled', 'reloaded', 'reset']), - default = dict(default=None, aliases=['policy'], choices=['allow', 'deny', 'reject']), - logging = dict(default=None, choices=['on', 'off', 'low', 'medium', 'high', 'full']), - direction = dict(default=None, choices=['in', 'incoming', 'out', 'outgoing']), - delete = dict(default=False, type='bool'), - insert = dict(default=None), - rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']), - interface = dict(default=None, aliases=['if']), - log = dict(default=False, type='bool'), - from_ip = dict(default='any', aliases=['src', 'from']), - from_port = dict(default=None), - to_ip = dict(default='any', aliases=['dest', 'to']), - to_port = dict(default=None, aliases=['port']), - proto = dict(default=None, aliases=['protocol'], choices=['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah']), - app = dict(default=None, aliases=['name']) - ), - supports_check_mode = True, - mutually_exclusive = [['app', 'proto', 'logging']] - ) - - cmds = [] - - def execute(cmd): - cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd))) - - cmds.append(cmd) - (rc, out, err) = module.run_command(cmd) - - if rc != 0: - module.fail_json(msg=err or out) - - params = module.params - - # Ensure at least one of the command arguments are given - command_keys = ['state', 'default', 'rule', 'logging'] - commands = dict((key, params[key]) for key in command_keys if params[key]) - - if len(commands) < 1: - module.fail_json(msg="Not any of the command arguments %s given" % commands) - - if('interface' in params and 'direction' not in params): - module.fail_json(msg="Direction must be specified when creating a rule on an interface") - - # Ensure ufw is available - ufw_bin = module.get_bin_path('ufw', True) - - # Save the pre state and rules in order to recognize changes - (_, pre_state, _) = module.run_command(ufw_bin + ' status verbose') - (_, pre_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user*.rules") - - # Execute commands - for (command, value) in commands.iteritems(): - cmd = [[ufw_bin], [module.check_mode, '--dry-run']] - - if command == 'state': - states = { 'enabled': 'enable', 'disabled': 'disable', - 'reloaded': 'reload', 'reset': 'reset' } - execute(cmd + [['-f'], [states[value]]]) - - elif command == 'logging': - execute(cmd + [[command], [value]]) - - elif command == 'default': - execute(cmd + [[command], [value], [params['direction']]]) - - elif command == 'rule': - # Rules are constructed according to the long format - # - # ufw [--dry-run] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ - # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \ - # [proto protocol] [app application] - cmd.append([module.boolean(params['delete']), 'delete']) - cmd.append([params['insert'], "insert %s" % params['insert']]) - cmd.append([value]) - cmd.append([module.boolean(params['log']), 'log']) - - for (key, template) in [('direction', "%s" ), ('interface', "on %s" ), - ('from_ip', "from %s" ), ('from_port', "port %s" ), - ('to_ip', "to %s" ), ('to_port', "port %s" ), - ('proto', "proto %s"), ('app', "app '%s'")]: - - value = params[key] - cmd.append([value, template % (value)]) - - execute(cmd) - - # Get the new state - (_, post_state, _) = module.run_command(ufw_bin + ' status verbose') - (_, post_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user*.rules") - changed = (pre_state != post_state) or (pre_rules != post_rules) - - return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip()) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/system/user b/library/system/user deleted file mode 100644 index 551384a7a67..00000000000 --- a/library/system/user +++ /dev/null @@ -1,1584 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Stephen Fromm -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: user -author: Stephen Fromm -version_added: "0.2" -short_description: Manage user accounts -requirements: [ useradd, userdel, usermod ] -description: - - Manage user accounts and user attributes. -options: - name: - required: true - aliases: [ "user" ] - description: - - Name of the user to create, remove or modify. - comment: - required: false - description: - - Optionally sets the description (aka I(GECOS)) of user account. - uid: - required: false - description: - - Optionally sets the I(UID) of the user. - non_unique: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - Optionally when used with the -u option, this option allows to - change the user ID to a non-unique value. - version_added: "1.1" - group: - required: false - description: - - Optionally sets the user's primary group (takes a group name). - groups: - required: false - description: - - Puts the user in this comma-delimited list of groups. When set to - the empty string ('groups='), the user is removed from all groups - except the primary group. - append: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - If C(yes), will only add groups, not set them to just the list - in I(groups). - shell: - required: false - description: - - Optionally set the user's shell. - home: - required: false - description: - - Optionally set the user's home directory. - password: - required: false - description: - - Optionally set the user's password to this crypted value. See - the user example in the github examples directory for what this looks - like in a playbook. The `FAQ `_ - contains details on various ways to generate these password values. - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the account should exist. When C(absent), removes - the user account. - createhome: - required: false - default: "yes" - choices: [ "yes", "no" ] - description: - - Unless set to C(no), a home directory will be made for the user - when the account is created or if the home directory does not - exist. - move_home: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - If set to C(yes) when used with C(home=), attempt to move the - user's home directory to the specified directory if it isn't there - already. - system: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - When creating an account, setting this to C(yes) makes the user a - system account. This setting cannot be changed on existing users. - force: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - When used with C(state=absent), behavior is as with - C(userdel --force). - login_class: - required: false - description: - - Optionally sets the user's login class for FreeBSD, OpenBSD and NetBSD systems. - remove: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - When used with C(state=absent), behavior is as with - C(userdel --remove). - generate_ssh_key: - required: false - default: "no" - choices: [ "yes", "no" ] - version_added: "0.9" - description: - - Whether to generate a SSH key for the user in question. - This will B(not) overwrite an existing SSH key. - ssh_key_bits: - required: false - default: 2048 - version_added: "0.9" - description: - - Optionally specify number of bits in SSH key to create. - ssh_key_type: - required: false - default: rsa - version_added: "0.9" - description: - - Optionally specify the type of SSH key to generate. - Available SSH key types will depend on implementation - present on target host. - ssh_key_file: - required: false - default: $HOME/.ssh/id_rsa - version_added: "0.9" - description: - - Optionally specify the SSH key filename. - ssh_key_comment: - required: false - default: ansible-generated - version_added: "0.9" - description: - - Optionally define the comment for the SSH key. - ssh_key_passphrase: - required: false - version_added: "0.9" - description: - - Set a passphrase for the SSH key. If no - passphrase is provided, the SSH key will default to - having no passphrase. - update_password: - required: false - default: always - choices: ['always', 'on_create'] - version_added: "1.3" - description: - - C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users. -''' - -EXAMPLES = ''' -# Add the user 'johnd' with a specific uid and a primary group of 'admin' -- user: name=johnd comment="John Doe" uid=1040 group=admin - -# Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups -- user: name=james shell=/bin/bash groups=admins,developers append=yes - -# Remove the user 'johnd' -- user: name=johnd state=absent remove=yes - -# Create a 2048-bit SSH key for user jsmith -- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 -''' - -import os -import pwd -import grp -import syslog -import platform - -try: - import spwd - HAVE_SPWD=True -except: - HAVE_SPWD=False - - -class User(object): - """ - This is a generic User manipulation class that is subclassed - based on platform. - - A subclass may wish to override the following action methods:- - - create_user() - - remove_user() - - modify_user() - - ssh_key_gen() - - ssh_key_fingerprint() - - user_exists() - - All subclasses MUST define platform and distribution (which may be None). - """ - - platform = 'Generic' - distribution = None - SHADOWFILE = '/etc/shadow' - - def __new__(cls, *args, **kwargs): - return load_platform_subclass(User, args, kwargs) - - def __init__(self, module): - self.module = module - self.state = module.params['state'] - self.name = module.params['name'] - self.uid = module.params['uid'] - self.non_unique = module.params['non_unique'] - self.group = module.params['group'] - self.groups = module.params['groups'] - self.comment = module.params['comment'] - self.home = module.params['home'] - self.shell = module.params['shell'] - self.password = module.params['password'] - self.force = module.params['force'] - self.remove = module.params['remove'] - self.createhome = module.params['createhome'] - self.move_home = module.params['move_home'] - self.system = module.params['system'] - self.login_class = module.params['login_class'] - self.append = module.params['append'] - self.sshkeygen = module.params['generate_ssh_key'] - self.ssh_bits = module.params['ssh_key_bits'] - self.ssh_type = module.params['ssh_key_type'] - self.ssh_comment = module.params['ssh_key_comment'] - self.ssh_passphrase = module.params['ssh_key_passphrase'] - self.update_password = module.params['update_password'] - if module.params['ssh_key_file'] is not None: - self.ssh_file = module.params['ssh_key_file'] - else: - self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type) - - # select whether we dump additional debug info through syslog - self.syslogging = False - - def execute_command(self, cmd): - if self.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd)) - - return self.module.run_command(cmd) - - def remove_user_userdel(self): - cmd = [self.module.get_bin_path('userdel', True)] - if self.force: - cmd.append('-f') - if self.remove: - cmd.append('-r') - cmd.append(self.name) - - return self.execute_command(cmd) - - def create_user_useradd(self, command_name='useradd'): - cmd = [self.module.get_bin_path(command_name, True)] - - if self.uid is not None: - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - cmd.append('-g') - cmd.append(self.group) - elif self.group_exists(self.name): - # use the -N option (no user group) if a group already - # exists with the same name as the user to prevent - # errors from useradd trying to create a group when - # USERGROUPS_ENAB is set in /etc/login.defs. - cmd.append('-N') - - if self.groups is not None and len(self.groups): - groups = self.get_groups_set() - cmd.append('-G') - cmd.append(','.join(groups)) - - if self.comment is not None: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None: - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None: - cmd.append('-s') - cmd.append(self.shell) - - if self.password is not None: - cmd.append('-p') - cmd.append(self.password) - - if self.createhome: - cmd.append('-m') - else: - cmd.append('-M') - - if self.system: - cmd.append('-r') - - cmd.append(self.name) - return self.execute_command(cmd) - - - def _check_usermod_append(self): - # check if this version of usermod can append groups - usermod_path = self.module.get_bin_path('usermod', True) - - # for some reason, usermod --help cannot be used by non root - # on RH/Fedora, due to lack of execute bit for others - if not os.access(usermod_path, os.X_OK): - return False - - cmd = [usermod_path] - cmd.append('--help') - rc, data1, data2 = self.execute_command(cmd) - helpout = data1 + data2 - - # check if --append exists - lines = helpout.split('\n') - for line in lines: - if line.strip().startswith('-a, --append'): - return True - - return False - - - - def modify_user_usermod(self): - cmd = [self.module.get_bin_path('usermod', True)] - info = self.user_info() - has_append = self._check_usermod_append() - - if self.uid is not None and info[2] != int(self.uid): - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - ginfo = self.group_info(self.group) - if info[3] != ginfo[2]: - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - current_groups = self.user_group_membership() - groups_need_mod = False - groups = [] - - if self.groups == '': - if current_groups and not self.append: - groups_need_mod = True - else: - groups = self.get_groups_set(remove_existing=False) - group_diff = set(current_groups).symmetric_difference(groups) - - if group_diff: - if self.append: - for g in groups: - if g in group_diff: - if has_append: - cmd.append('-a') - groups_need_mod = True - break - else: - groups_need_mod = True - - if groups_need_mod: - if self.append and not has_append: - cmd.append('-A') - cmd.append(','.join(group_diff)) - else: - cmd.append('-G') - cmd.append(','.join(groups)) - - - if self.comment is not None and info[4] != self.comment: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None and info[5] != self.home: - cmd.append('-d') - cmd.append(self.home) - if self.move_home: - cmd.append('-m') - - if self.shell is not None and info[6] != self.shell: - cmd.append('-s') - cmd.append(self.shell) - - if self.update_password == 'always' and self.password is not None and info[1] != self.password: - cmd.append('-p') - cmd.append(self.password) - - # skip if no changes to be made - if len(cmd) == 1: - return (None, '', '') - elif self.module.check_mode: - return (0, '', '') - - cmd.append(self.name) - return self.execute_command(cmd) - - def group_exists(self,group): - try: - if group.isdigit(): - if grp.getgrgid(int(group)): - return True - else: - if grp.getgrnam(group): - return True - except KeyError: - return False - - def group_info(self,group): - if not self.group_exists(group): - return False - if group.isdigit(): - return list(grp.getgrgid(group)) - else: - return list(grp.getgrnam(group)) - - def get_groups_set(self, remove_existing=True): - if self.groups is None: - return None - info = self.user_info() - groups = set(filter(None, self.groups.split(','))) - for g in set(groups): - if not self.group_exists(g): - self.module.fail_json(msg="Group %s does not exist" % (g)) - if info and remove_existing and self.group_info(g)[2] == info[3]: - groups.remove(g) - return groups - - def user_group_membership(self): - groups = [] - info = self.get_pwd_info() - for group in grp.getgrall(): - if self.name in group.gr_mem and not info[3] == group.gr_gid: - groups.append(group[0]) - return groups - - def user_exists(self): - try: - if pwd.getpwnam(self.name): - return True - except KeyError: - return False - - def get_pwd_info(self): - if not self.user_exists(): - return False - return list(pwd.getpwnam(self.name)) - - def user_info(self): - if not self.user_exists(): - return False - info = self.get_pwd_info() - if len(info[1]) == 1 or len(info[1]) == 0: - info[1] = self.user_password() - return info - - def user_password(self): - passwd = '' - if HAVE_SPWD: - try: - passwd = spwd.getspnam(self.name)[1] - except KeyError: - return passwd - if not self.user_exists(): - return passwd - else: - # Read shadow file for user's encrypted password string - if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK): - for line in open(self.SHADOWFILE).readlines(): - if line.startswith('%s:' % self.name): - passwd = line.split(':')[1] - return passwd - - def get_ssh_key_path(self): - info = self.user_info() - if os.path.isabs(self.ssh_file): - ssh_key_file = self.ssh_file - else: - ssh_key_file = os.path.join(info[5], self.ssh_file) - return ssh_key_file - - def ssh_key_gen(self): - info = self.user_info() - if not os.path.exists(info[5]): - return (1, '', 'User %s home directory does not exist' % self.name) - ssh_key_file = self.get_ssh_key_path() - ssh_dir = os.path.dirname(ssh_key_file) - if not os.path.exists(ssh_dir): - try: - os.mkdir(ssh_dir, 0700) - os.chown(ssh_dir, info[2], info[3]) - except OSError, e: - return (1, '', 'Failed to create %s: %s' % (ssh_dir, str(e))) - if os.path.exists(ssh_key_file): - return (None, 'Key already exists', '') - cmd = [self.module.get_bin_path('ssh-keygen', True)] - cmd.append('-t') - cmd.append(self.ssh_type) - cmd.append('-b') - cmd.append(self.ssh_bits) - cmd.append('-C') - cmd.append(self.ssh_comment) - cmd.append('-f') - cmd.append(ssh_key_file) - cmd.append('-N') - if self.ssh_passphrase is not None: - cmd.append(self.ssh_passphrase) - else: - cmd.append('') - - (rc, out, err) = self.execute_command(cmd) - if rc == 0: - # If the keys were successfully created, we should be able - # to tweak ownership. - os.chown(ssh_key_file, info[2], info[3]) - os.chown('%s.pub' % ssh_key_file, info[2], info[3]) - return (rc, out, err) - - def ssh_key_fingerprint(self): - ssh_key_file = self.get_ssh_key_path() - if not os.path.exists(ssh_key_file): - return (1, 'SSH Key file %s does not exist' % ssh_key_file, '') - cmd = [ self.module.get_bin_path('ssh-keygen', True) ] - cmd.append('-l') - cmd.append('-f') - cmd.append(ssh_key_file) - - return self.execute_command(cmd) - - def get_ssh_public_key(self): - ssh_public_key_file = '%s.pub' % self.get_ssh_key_path() - try: - f = open(ssh_public_key_file) - ssh_public_key = f.read().strip() - f.close() - except IOError: - return None - return ssh_public_key - - def create_user(self): - # by default we use the create_user_useradd method - return self.create_user_useradd() - - def remove_user(self): - # by default we use the remove_user_userdel method - return self.remove_user_userdel() - - def modify_user(self): - # by default we use the modify_user_usermod method - return self.modify_user_usermod() - - def create_homedir(self, path): - if not os.path.exists(path): - # use /etc/skel if possible - if os.path.exists('/etc/skel'): - try: - shutil.copytree('/etc/skel', path, symlinks=True) - except OSError, e: - self.module.exit_json(failed=True, msg="%s" % e) - else: - try: - os.makedirs(path) - except OSError, e: - self.module.exit_json(failed=True, msg="%s" % e) - - def chown_homedir(self, uid, gid, path): - try: - os.chown(path, uid, gid) - for root, dirs, files in os.walk(path): - for d in dirs: - os.chown(path, uid, gid) - for f in files: - os.chown(os.path.join(root, f), uid, gid) - except OSError, e: - self.module.exit_json(failed=True, msg="%s" % e) - - -# =========================================== - -class FreeBsdUser(User): - """ - This is a FreeBSD User manipulation class - it uses the pw command - to manipulate the user database, followed by the chpass command - to change the password. - - This overrides the following methods from the generic class:- - - create_user() - - remove_user() - - modify_user() - """ - - platform = 'FreeBSD' - distribution = None - SHADOWFILE = '/etc/master.passwd' - - def remove_user(self): - cmd = [ - self.module.get_bin_path('pw', True), - 'userdel', - '-n', - self.name - ] - if self.remove: - cmd.append('-r') - - return self.execute_command(cmd) - - def create_user(self): - cmd = [ - self.module.get_bin_path('pw', True), - 'useradd', - '-n', - self.name, - ] - - if self.uid is not None: - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.comment is not None: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None: - cmd.append('-d') - cmd.append(self.home) - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - groups = self.get_groups_set() - cmd.append('-G') - cmd.append(','.join(groups)) - - if self.createhome: - cmd.append('-m') - - if self.shell is not None: - cmd.append('-s') - cmd.append(self.shell) - - if self.login_class is not None: - cmd.append('-L') - cmd.append(self.login_class) - - # system cannot be handled currently - should we error if its requested? - # create the user - (rc, out, err) = self.execute_command(cmd) - if rc is not None and rc != 0: - self.module.fail_json(name=self.name, msg=err, rc=rc) - - # we have to set the password in a second command - if self.password is not None: - cmd = [ - self.module.get_bin_path('chpass', True), - '-p', - self.password, - self.name - ] - return self.execute_command(cmd) - - return (rc, out, err) - - def modify_user(self): - cmd = [ - self.module.get_bin_path('pw', True), - 'usermod', - '-n', - self.name - ] - cmd_len = len(cmd) - info = self.user_info() - - if self.uid is not None and info[2] != int(self.uid): - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.comment is not None and info[4] != self.comment: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None and info[5] != self.home: - if self.move_home: - cmd.append('-m') - cmd.append('-d') - cmd.append(self.home) - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - ginfo = self.group_info(self.group) - if info[3] != ginfo[2]: - cmd.append('-g') - cmd.append(self.group) - - if self.shell is not None and info[6] != self.shell: - cmd.append('-s') - cmd.append(self.shell) - - if self.login_class is not None: - cmd.append('-L') - cmd.append(self.login_class) - - if self.groups is not None: - current_groups = self.user_group_membership() - groups = self.get_groups_set() - - group_diff = set(current_groups).symmetric_difference(groups) - groups_need_mod = False - - if group_diff: - if self.append: - for g in groups: - if g in group_diff: - groups_need_mod = True - break - else: - groups_need_mod = True - - if groups_need_mod: - cmd.append('-G') - new_groups = groups - if self.append: - new_groups = groups | set(current_groups) - cmd.append(','.join(new_groups)) - - # modify the user if cmd will do anything - if cmd_len != len(cmd): - (rc, out, err) = self.execute_command(cmd) - if rc is not None and rc != 0: - self.module.fail_json(name=self.name, msg=err, rc=rc) - else: - (rc, out, err) = (None, '', '') - - # we have to set the password in a second command - if self.update_password == 'always' and self.password is not None and info[1] != self.password: - cmd = [ - self.module.get_bin_path('chpass', True), - '-p', - self.password, - self.name - ] - return self.execute_command(cmd) - - return (rc, out, err) - -# =========================================== - -class OpenBSDUser(User): - """ - This is a OpenBSD User manipulation class. - Main differences are that OpenBSD:- - - has no concept of "system" account. - - has no force delete user - - This overrides the following methods from the generic class:- - - create_user() - - remove_user() - - modify_user() - """ - - platform = 'OpenBSD' - distribution = None - SHADOWFILE = '/etc/master.passwd' - - def create_user(self): - cmd = [self.module.get_bin_path('useradd', True)] - - if self.uid is not None: - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - groups = self.get_groups_set() - cmd.append('-G') - cmd.append(','.join(groups)) - - if self.comment is not None: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None: - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None: - cmd.append('-s') - cmd.append(self.shell) - - if self.login_class is not None: - cmd.append('-L') - cmd.append(self.login_class) - - if self.password is not None: - cmd.append('-p') - cmd.append(self.password) - - if self.createhome: - cmd.append('-m') - - cmd.append(self.name) - return self.execute_command(cmd) - - def remove_user_userdel(self): - cmd = [self.module.get_bin_path('userdel', True)] - if self.remove: - cmd.append('-r') - cmd.append(self.name) - return self.execute_command(cmd) - - def modify_user(self): - cmd = [self.module.get_bin_path('usermod', True)] - info = self.user_info() - - if self.uid is not None and info[2] != int(self.uid): - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - ginfo = self.group_info(self.group) - if info[3] != ginfo[2]: - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - current_groups = self.user_group_membership() - groups_need_mod = False - groups_option = '-G' - groups = [] - - if self.groups == '': - if current_groups and not self.append: - groups_need_mod = True - else: - groups = self.get_groups_set() - group_diff = set(current_groups).symmetric_difference(groups) - - if group_diff: - if self.append: - for g in groups: - if g in group_diff: - groups_option = '-S' - groups_need_mod = True - break - else: - groups_need_mod = True - - if groups_need_mod: - cmd.append(groups_option) - cmd.append(','.join(groups)) - - if self.comment is not None and info[4] != self.comment: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None and info[5] != self.home: - if self.move_home: - cmd.append('-m') - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None and info[6] != self.shell: - cmd.append('-s') - cmd.append(self.shell) - - if self.login_class is not None: - # find current login class - user_login_class = None - userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name] - (rc, out, err) = self.execute_command(userinfo_cmd) - - for line in out.splitlines(): - tokens = line.split() - - if tokens[0] == 'class' and len(tokens) == 2: - user_login_class = tokens[1] - - # act only if login_class change - if self.login_class != user_login_class: - cmd.append('-L') - cmd.append(self.login_class) - - if self.update_password == 'always' and self.password is not None and info[1] != self.password: - cmd.append('-p') - cmd.append(self.password) - - # skip if no changes to be made - if len(cmd) == 1: - return (None, '', '') - elif self.module.check_mode: - return (0, '', '') - - cmd.append(self.name) - return self.execute_command(cmd) - - -# =========================================== - -class NetBSDUser(User): - """ - This is a NetBSD User manipulation class. - Main differences are that NetBSD:- - - has no concept of "system" account. - - has no force delete user - - - This overrides the following methods from the generic class:- - - create_user() - - remove_user() - - modify_user() - """ - - platform = 'NetBSD' - distribution = None - SHADOWFILE = '/etc/master.passwd' - - def create_user(self): - cmd = [self.module.get_bin_path('useradd', True)] - - if self.uid is not None: - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - groups = self.get_groups_set() - if len(groups) > 16: - self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups)) - cmd.append('-G') - cmd.append(','.join(groups)) - - if self.comment is not None: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None: - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None: - cmd.append('-s') - cmd.append(self.shell) - - if self.login_class is not None: - cmd.append('-L') - cmd.append(self.login_class) - - if self.password is not None: - cmd.append('-p') - cmd.append(self.password) - - if self.createhome: - cmd.append('-m') - - cmd.append(self.name) - return self.execute_command(cmd) - - def remove_user_userdel(self): - cmd = [self.module.get_bin_path('userdel', True)] - if self.remove: - cmd.append('-r') - cmd.append(self.name) - return self.execute_command(cmd) - - def modify_user(self): - cmd = [self.module.get_bin_path('usermod', True)] - info = self.user_info() - - if self.uid is not None and info[2] != int(self.uid): - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - ginfo = self.group_info(self.group) - if info[3] != ginfo[2]: - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - current_groups = self.user_group_membership() - groups_need_mod = False - groups = [] - - if self.groups == '': - if current_groups and not self.append: - groups_need_mod = True - else: - groups = self.get_groups_set() - group_diff = set(current_groups).symmetric_difference(groups) - - if group_diff: - if self.append: - for g in groups: - if g in group_diff: - groups = set(current_groups).union(groups) - groups_need_mod = True - break - else: - groups_need_mod = True - - if groups_need_mod: - if len(groups) > 16: - self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups)) - cmd.append('-G') - cmd.append(','.join(groups)) - - if self.comment is not None and info[4] != self.comment: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None and info[5] != self.home: - if self.move_home: - cmd.append('-m') - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None and info[6] != self.shell: - cmd.append('-s') - cmd.append(self.shell) - - if self.login_class is not None: - cmd.append('-L') - cmd.append(self.login_class) - - if self.update_password == 'always' and self.password is not None and info[1] != self.password: - cmd.append('-p') - cmd.append(self.password) - - # skip if no changes to be made - if len(cmd) == 1: - return (None, '', '') - elif self.module.check_mode: - return (0, '', '') - - cmd.append(self.name) - return self.execute_command(cmd) - - -# =========================================== - -class SunOS(User): - """ - This is a SunOS User manipulation class - The main difference between - this class and the generic user class is that Solaris-type distros - don't support the concept of a "system" account and we need to - edit the /etc/shadow file manually to set a password. (Ugh) - - This overrides the following methods from the generic class:- - - create_user() - - remove_user() - - modify_user() - """ - - platform = 'SunOS' - distribution = None - SHADOWFILE = '/etc/shadow' - - def remove_user(self): - cmd = [self.module.get_bin_path('userdel', True)] - if self.remove: - cmd.append('-r') - cmd.append(self.name) - - return self.execute_command(cmd) - - def create_user(self): - cmd = [self.module.get_bin_path('useradd', True)] - - if self.uid is not None: - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - groups = self.get_groups_set() - cmd.append('-G') - cmd.append(','.join(groups)) - - if self.comment is not None: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None: - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None: - cmd.append('-s') - cmd.append(self.shell) - - if self.createhome: - cmd.append('-m') - - cmd.append(self.name) - - if self.module.check_mode: - return (0, '', '') - else: - (rc, out, err) = self.execute_command(cmd) - if rc is not None and rc != 0: - self.module.fail_json(name=self.name, msg=err, rc=rc) - - # we have to set the password by editing the /etc/shadow file - if self.password is not None: - try: - lines = [] - for line in open(self.SHADOWFILE, 'rb').readlines(): - fields = line.strip().split(':') - if not fields[0] == self.name: - lines.append(line) - continue - fields[1] = self.password - fields[2] = str(int(time.time() / 86400)) - line = ':'.join(fields) - lines.append('%s\n' % line) - open(self.SHADOWFILE, 'w+').writelines(lines) - except Exception, err: - self.module.fail_json(msg="failed to update users password: %s" % str(err)) - - return (rc, out, err) - - def modify_user_usermod(self): - cmd = [self.module.get_bin_path('usermod', True)] - cmd_len = len(cmd) - info = self.user_info() - - if self.uid is not None and info[2] != int(self.uid): - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - ginfo = self.group_info(self.group) - if info[3] != ginfo[2]: - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - current_groups = self.user_group_membership() - groups = self.get_groups_set() - group_diff = set(current_groups).symmetric_difference(groups) - groups_need_mod = False - - if group_diff: - if self.append: - for g in groups: - if g in group_diff: - groups_need_mod = True - break - else: - groups_need_mod = True - - if groups_need_mod: - cmd.append('-G') - new_groups = groups - if self.append: - new_groups.extend(current_groups) - cmd.append(','.join(new_groups)) - - if self.comment is not None and info[4] != self.comment: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None and info[5] != self.home: - if self.move_home: - cmd.append('-m') - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None and info[6] != self.shell: - cmd.append('-s') - cmd.append(self.shell) - - if self.module.check_mode: - return (0, '', '') - else: - # modify the user if cmd will do anything - if cmd_len != len(cmd): - cmd.append(self.name) - (rc, out, err) = self.execute_command(cmd) - if rc is not None and rc != 0: - self.module.fail_json(name=self.name, msg=err, rc=rc) - else: - (rc, out, err) = (None, '', '') - - # we have to set the password by editing the /etc/shadow file - if self.update_password == 'always' and self.password is not None and info[1] != self.password: - try: - lines = [] - for line in open(self.SHADOWFILE, 'rb').readlines(): - fields = line.strip().split(':') - if not fields[0] == self.name: - lines.append(line) - continue - fields[1] = self.password - fields[2] = str(int(time.time() / 86400)) - line = ':'.join(fields) - lines.append('%s\n' % line) - open(self.SHADOWFILE, 'w+').writelines(lines) - rc = 0 - except Exception, err: - self.module.fail_json(msg="failed to update users password: %s" % str(err)) - - return (rc, out, err) - -# =========================================== - -class AIX(User): - """ - This is a AIX User manipulation class. - - This overrides the following methods from the generic class:- - - create_user() - - remove_user() - - modify_user() - """ - - platform = 'AIX' - distribution = None - SHADOWFILE = '/etc/security/passwd' - - def remove_user(self): - cmd = [self.module.get_bin_path('userdel', True)] - if self.remove: - cmd.append('-r') - cmd.append(self.name) - - return self.execute_command(cmd) - - def create_user_useradd(self, command_name='useradd'): - cmd = [self.module.get_bin_path(command_name, True)] - - if self.uid is not None: - cmd.append('-u') - cmd.append(self.uid) - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None and len(self.groups): - groups = self.get_groups_set() - cmd.append('-G') - cmd.append(','.join(groups)) - - if self.comment is not None: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None: - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None: - cmd.append('-s') - cmd.append(self.shell) - - if self.createhome: - cmd.append('-m') - - cmd.append(self.name) - (rc, out, err) = self.execute_command(cmd) - - # set password with chpasswd - if self.password is not None: - cmd = [] - cmd.append('echo "'+self.name+':'+self.password+'" |') - cmd.append(self.module.get_bin_path('chpasswd', True)) - cmd.append('-e') - cmd.append('-c') - self.execute_command(' '.join(cmd)) - - return (rc, out, err) - - def modify_user_usermod(self): - cmd = [self.module.get_bin_path('usermod', True)] - info = self.user_info() - - if self.uid is not None and info[2] != int(self.uid): - cmd.append('-u') - cmd.append(self.uid) - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - ginfo = self.group_info(self.group) - if info[3] != ginfo[2]: - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - current_groups = self.user_group_membership() - groups_need_mod = False - groups = [] - - if self.groups == '': - if current_groups and not self.append: - groups_need_mod = True - else: - groups = self.get_groups_set() - group_diff = set(current_groups).symmetric_difference(groups) - - if group_diff: - if self.append: - for g in groups: - if g in group_diff: - groups_need_mod = True - break - else: - groups_need_mod = True - - if groups_need_mod: - cmd.append('-G') - cmd.append(','.join(groups)) - - if self.comment is not None and info[4] != self.comment: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None and info[5] != self.home: - if self.move_home: - cmd.append('-m') - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None and info[6] != self.shell: - cmd.append('-s') - cmd.append(self.shell) - - - # skip if no changes to be made - if len(cmd) == 1: - (rc, out, err) = (None, '', '') - elif self.module.check_mode: - return (True, '', '') - else: - cmd.append(self.name) - (rc, out, err) = self.execute_command(cmd) - - # set password with chpasswd - if self.update_password == 'always' and self.password is not None and info[1] != self.password: - cmd = [] - cmd.append('echo "'+self.name+':'+self.password+'" |') - cmd.append(self.module.get_bin_path('chpasswd', True)) - cmd.append('-e') - cmd.append('-c') - (rc2, out2, err2) = self.execute_command(' '.join(cmd)) - else: - (rc2, out2, err2) = (None, '', '') - - if rc != None: - return (rc, out+out2, err+err2) - else: - return (rc2, out+out2, err+err2) - -# =========================================== - -def main(): - ssh_defaults = { - 'bits': '2048', - 'type': 'rsa', - 'passphrase': None, - 'comment': 'ansible-generated' - } - module = AnsibleModule( - argument_spec = dict( - state=dict(default='present', choices=['present', 'absent'], type='str'), - name=dict(required=True, aliases=['user'], type='str'), - uid=dict(default=None, type='str'), - non_unique=dict(default='no', type='bool'), - group=dict(default=None, type='str'), - groups=dict(default=None, type='str'), - comment=dict(default=None, type='str'), - home=dict(default=None, type='str'), - shell=dict(default=None, type='str'), - password=dict(default=None, type='str'), - login_class=dict(default=None, type='str'), - # following options are specific to userdel - force=dict(default='no', type='bool'), - remove=dict(default='no', type='bool'), - # following options are specific to useradd - createhome=dict(default='yes', type='bool'), - system=dict(default='no', type='bool'), - # following options are specific to usermod - move_home=dict(default='no', type='bool'), - append=dict(default='no', type='bool'), - # following are specific to ssh key generation - generate_ssh_key=dict(type='bool'), - ssh_key_bits=dict(default=ssh_defaults['bits'], type='str'), - ssh_key_type=dict(default=ssh_defaults['type'], type='str'), - ssh_key_file=dict(default=None, type='str'), - ssh_key_comment=dict(default=ssh_defaults['comment'], type='str'), - ssh_key_passphrase=dict(default=None, type='str'), - update_password=dict(default='always',choices=['always','on_create'],type='str') - ), - supports_check_mode=True - ) - - user = User(module) - - if user.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'User instantiated - platform %s' % user.platform) - if user.distribution: - syslog.syslog(syslog.LOG_NOTICE, 'User instantiated - distribution %s' % user.distribution) - - rc = None - out = '' - err = '' - result = {} - result['name'] = user.name - result['state'] = user.state - if user.state == 'absent': - if user.user_exists(): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = user.remove_user() - if rc != 0: - module.fail_json(name=user.name, msg=err, rc=rc) - result['force'] = user.force - result['remove'] = user.remove - elif user.state == 'present': - if not user.user_exists(): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = user.create_user() - result['system'] = user.system - result['createhome'] = user.createhome - else: - # modify user (note: this function is check mode aware) - (rc, out, err) = user.modify_user() - result['append'] = user.append - result['move_home'] = user.move_home - if rc is not None and rc != 0: - module.fail_json(name=user.name, msg=err, rc=rc) - if user.password is not None: - result['password'] = 'NOT_LOGGING_PASSWORD' - - if rc is None: - result['changed'] = False - else: - result['changed'] = True - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - if user.user_exists(): - info = user.user_info() - if info == False: - result['msg'] = "failed to look up user name: %s" % user.name - result['failed'] = True - result['uid'] = info[2] - result['group'] = info[3] - result['comment'] = info[4] - result['home'] = info[5] - result['shell'] = info[6] - result['uid'] = info[2] - if user.groups is not None: - result['groups'] = user.groups - - # deal with ssh key - if user.sshkeygen: - (rc, out, err) = user.ssh_key_gen() - if rc is not None and rc != 0: - module.fail_json(name=user.name, msg=err, rc=rc) - if rc == 0: - result['changed'] = True - (rc, out, err) = user.ssh_key_fingerprint() - if rc == 0: - result['ssh_fingerprint'] = out.strip() - else: - result['ssh_fingerprint'] = err.strip() - result['ssh_key_file'] = user.get_ssh_key_path() - result['ssh_public_key'] = user.get_ssh_public_key() - - # handle missing homedirs - info = user.user_info() - if user.home is None: - user.home = info[5] - if not os.path.exists(user.home) and user.createhome: - if not module.check_mode: - user.create_homedir(user.home) - user.chown_homedir(info[2], info[3], user.home) - result['changed'] = True - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/zfs b/library/system/zfs deleted file mode 100644 index 93248897051..00000000000 --- a/library/system/zfs +++ /dev/null @@ -1,417 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Johan Wiren -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: zfs -short_description: Manage zfs -description: - - Manages ZFS file systems on Solaris and FreeBSD. Can manage file systems, volumes and snapshots. See zfs(1M) for more information about the properties. -version_added: "1.1" -options: - name: - description: - - File system, snapshot or volume name e.g. C(rpool/myfs) - required: true - state: - description: - - Whether to create (C(present)), or remove (C(absent)) a file system, snapshot or volume. - required: true - choices: [present, absent] - aclinherit: - description: - - The aclinherit property. - required: False - choices: [discard,noallow,restricted,passthrough,passthrough-x] - aclmode: - description: - - The aclmode property. - required: False - choices: [discard,groupmask,passthrough] - atime: - description: - - The atime property. - required: False - choices: ['on','off'] - canmount: - description: - - The canmount property. - required: False - choices: ['on','off','noauto'] - casesensitivity: - description: - - The casesensitivity property. - required: False - choices: [sensitive,insensitive,mixed] - checksum: - description: - - The checksum property. - required: False - choices: ['on','off',fletcher2,fletcher4,sha256] - compression: - description: - - The compression property. - required: False - choices: ['on','off',lzjb,gzip,gzip-1,gzip-2,gzip-3,gzip-4,gzip-5,gzip-6,gzip-7,gzip-8,gzip-9,lz4,zle] - copies: - description: - - The copies property. - required: False - choices: [1,2,3] - dedup: - description: - - The dedup property. - required: False - choices: ['on','off'] - devices: - description: - - The devices property. - required: False - choices: ['on','off'] - exec: - description: - - The exec property. - required: False - choices: ['on','off'] - jailed: - description: - - The jailed property. - required: False - choices: ['on','off'] - logbias: - description: - - The logbias property. - required: False - choices: [latency,throughput] - mountpoint: - description: - - The mountpoint property. - required: False - nbmand: - description: - - The nbmand property. - required: False - choices: ['on','off'] - normalization: - description: - - The normalization property. - required: False - choices: [none,formC,formD,formKC,formKD] - primarycache: - description: - - The primarycache property. - required: False - choices: [all,none,metadata] - quota: - description: - - The quota property. - required: False - readonly: - description: - - The readonly property. - required: False - choices: ['on','off'] - recordsize: - description: - - The recordsize property. - required: False - refquota: - description: - - The refquota property. - required: False - refreservation: - description: - - The refreservation property. - required: False - reservation: - description: - - The reservation property. - required: False - secondarycache: - description: - - The secondarycache property. - required: False - choices: [all,none,metadata] - setuid: - description: - - The setuid property. - required: False - choices: ['on','off'] - shareiscsi: - description: - - The shareiscsi property. - required: False - choices: ['on','off'] - sharenfs: - description: - - The sharenfs property. - required: False - sharesmb: - description: - - The sharesmb property. - required: False - snapdir: - description: - - The snapdir property. - required: False - choices: [hidden,visible] - sync: - description: - - The sync property. - required: False - choices: ['on','off'] - utf8only: - description: - - The utf8only property. - required: False - choices: ['on','off'] - volsize: - description: - - The volsize property. - required: False - volblocksize: - description: - - The volblocksize property. - required: False - vscan: - description: - - The vscan property. - required: False - choices: ['on','off'] - xattr: - description: - - The xattr property. - required: False - choices: ['on','off'] - zoned: - description: - - The zoned property. - required: False - choices: ['on','off'] -author: Johan Wiren -''' - -EXAMPLES = ''' -# Create a new file system called myfs in pool rpool -- zfs: name=rpool/myfs state=present - -# Create a new volume called myvol in pool rpool. -- zfs: name=rpool/myvol state=present volsize=10M - -# Create a snapshot of rpool/myfs file system. -- zfs: name=rpool/myfs@mysnapshot state=present - -# Create a new file system called myfs2 with snapdir enabled -- zfs: name=rpool/myfs2 state=present snapdir=enabled -''' - - -import os - -class Zfs(object): - def __init__(self, module, name, properties): - self.module = module - self.name = name - self.properties = properties - self.changed = False - - self.immutable_properties = [ 'casesensitivity', 'normalization', 'utf8only' ] - - def exists(self): - cmd = [self.module.get_bin_path('zfs', True)] - cmd.append('list') - cmd.append('-t all') - cmd.append(self.name) - (rc, out, err) = self.module.run_command(' '.join(cmd)) - if rc == 0: - return True - else: - return False - - def create(self): - if self.module.check_mode: - self.changed = True - return - properties=self.properties - volsize = properties.pop('volsize', None) - volblocksize = properties.pop('volblocksize', None) - if "@" in self.name: - action = 'snapshot' - else: - action = 'create' - - cmd = [self.module.get_bin_path('zfs', True)] - cmd.append(action) - if volblocksize: - cmd.append('-b %s' % volblocksize) - if properties: - for prop, value in properties.iteritems(): - cmd.append('-o %s="%s"' % (prop, value)) - if volsize: - cmd.append('-V') - cmd.append(volsize) - cmd.append(self.name) - (rc, err, out) = self.module.run_command(' '.join(cmd)) - if rc == 0: - self.changed=True - else: - self.module.fail_json(msg=out) - - def destroy(self): - if self.module.check_mode: - self.changed = True - return - cmd = [self.module.get_bin_path('zfs', True)] - cmd.append('destroy') - cmd.append(self.name) - (rc, err, out) = self.module.run_command(' '.join(cmd)) - if rc == 0: - self.changed = True - else: - self.module.fail_json(msg=out) - - def set_property(self, prop, value): - if self.module.check_mode: - self.changed = True - return - cmd = self.module.get_bin_path('zfs', True) - args = [cmd, 'set', prop + '=' + value, self.name] - (rc, err, out) = self.module.run_command(args) - if rc == 0: - self.changed = True - else: - self.module.fail_json(msg=out) - - def set_properties_if_changed(self): - current_properties = self.get_current_properties() - for prop, value in self.properties.iteritems(): - if current_properties[prop] != value: - if prop in self.immutable_properties: - self.module.fail_json(msg='Cannot change property %s after creation.' % prop) - else: - self.set_property(prop, value) - - def get_current_properties(self): - def get_properties_by_name(propname): - cmd = [self.module.get_bin_path('zfs', True)] - cmd += ['get', '-H', propname, self.name] - rc, out, err = self.module.run_command(cmd) - return [l.split('\t')[1:3] for l in out.splitlines()] - properties = dict(get_properties_by_name('all')) - if 'share.*' in properties: - # Some ZFS pools list the sharenfs and sharesmb properties - # hierarchically as share.nfs and share.smb respectively. - del properties['share.*'] - for p, v in get_properties_by_name('share.all'): - alias = p.replace('.', '') # share.nfs -> sharenfs (etc) - properties[alias] = v - return properties - - def run_command(self, cmd): - progname = cmd[0] - cmd[0] = module.get_bin_path(progname, True) - return module.run_command(cmd) - -def main(): - - # FIXME: should use dict() constructor like other modules, required=False is default - module = AnsibleModule( - argument_spec = { - 'name': {'required': True}, - 'state': {'required': True, 'choices':['present', 'absent']}, - 'aclinherit': {'required': False, 'choices':['discard', 'noallow', 'restricted', 'passthrough', 'passthrough-x']}, - 'aclmode': {'required': False, 'choices':['discard', 'groupmask', 'passthrough']}, - 'atime': {'required': False, 'choices':['on', 'off']}, - 'canmount': {'required': False, 'choices':['on', 'off', 'noauto']}, - 'casesensitivity': {'required': False, 'choices':['sensitive', 'insensitive', 'mixed']}, - 'checksum': {'required': False, 'choices':['on', 'off', 'fletcher2', 'fletcher4', 'sha256']}, - 'compression': {'required': False, 'choices':['on', 'off', 'lzjb', 'gzip', 'gzip-1', 'gzip-2', 'gzip-3', 'gzip-4', 'gzip-5', 'gzip-6', 'gzip-7', 'gzip-8', 'gzip-9', 'lz4', 'zle']}, - 'copies': {'required': False, 'choices':['1', '2', '3']}, - 'dedup': {'required': False, 'choices':['on', 'off']}, - 'devices': {'required': False, 'choices':['on', 'off']}, - 'exec': {'required': False, 'choices':['on', 'off']}, - # Not supported - #'groupquota': {'required': False}, - 'jailed': {'required': False, 'choices':['on', 'off']}, - 'logbias': {'required': False, 'choices':['latency', 'throughput']}, - 'mountpoint': {'required': False}, - 'nbmand': {'required': False, 'choices':['on', 'off']}, - 'normalization': {'required': False, 'choices':['none', 'formC', 'formD', 'formKC', 'formKD']}, - 'primarycache': {'required': False, 'choices':['all', 'none', 'metadata']}, - 'quota': {'required': False}, - 'readonly': {'required': False, 'choices':['on', 'off']}, - 'recordsize': {'required': False}, - 'refquota': {'required': False}, - 'refreservation': {'required': False}, - 'reservation': {'required': False}, - 'secondarycache': {'required': False, 'choices':['all', 'none', 'metadata']}, - 'setuid': {'required': False, 'choices':['on', 'off']}, - 'shareiscsi': {'required': False, 'choices':['on', 'off']}, - 'sharenfs': {'required': False}, - 'sharesmb': {'required': False}, - 'snapdir': {'required': False, 'choices':['hidden', 'visible']}, - 'sync': {'required': False, 'choices':['on', 'off']}, - # Not supported - #'userquota': {'required': False}, - 'utf8only': {'required': False, 'choices':['on', 'off']}, - 'volsize': {'required': False}, - 'volblocksize': {'required': False}, - 'vscan': {'required': False, 'choices':['on', 'off']}, - 'xattr': {'required': False, 'choices':['on', 'off']}, - 'zoned': {'required': False, 'choices':['on', 'off']}, - }, - supports_check_mode=True - ) - - state = module.params.pop('state') - name = module.params.pop('name') - - # Get all valid zfs-properties - properties = dict() - for prop, value in module.params.iteritems(): - if prop in ['CHECKMODE']: - continue - if value: - properties[prop] = value - - result = {} - result['name'] = name - result['state'] = state - - zfs=Zfs(module, name, properties) - - if state == 'present': - if zfs.exists(): - zfs.set_properties_if_changed() - else: - zfs.create() - - elif state == 'absent': - if zfs.exists(): - zfs.destroy() - - result.update(zfs.properties) - result['changed'] = zfs.changed - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/utilities/accelerate b/library/utilities/accelerate deleted file mode 100644 index bd62471316c..00000000000 --- a/library/utilities/accelerate +++ /dev/null @@ -1,727 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, James Cammarata -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: accelerate -short_description: Enable accelerated mode on remote node -description: - - This modules launches an ephemeral I(accelerate) daemon on the remote node which - Ansible can use to communicate with nodes at high speed. - - The daemon listens on a configurable port for a configurable amount of time. - - Fireball mode is AES encrypted -version_added: "1.3" -options: - port: - description: - - TCP port for the socket connection - required: false - default: 5099 - aliases: [] - timeout: - description: - - The number of seconds the socket will wait for data. If none is received when the timeout value is reached, the connection will be closed. - required: false - default: 300 - aliases: [] - minutes: - description: - - The I(accelerate) listener daemon is started on nodes and will stay around for - this number of minutes before turning itself off. - required: false - default: 30 - ipv6: - description: - - The listener daemon on the remote host will bind to the ipv6 localhost socket - if this parameter is set to true. - required: false - default: false - multi_key: - description: - - When enabled, the daemon will open a local socket file which can be used by future daemon executions to - upload a new key to the already running daemon, so that multiple users can connect using different keys. - This access still requires an ssh connection as the uid for which the daemon is currently running. - required: false - default: no - version_added: "1.6" -notes: - - See the advanced playbooks chapter for more about using accelerated mode. -requirements: [ "python-keyczar" ] -author: James Cammarata -''' - -EXAMPLES = ''' -# To use accelerate mode, simply add "accelerate: true" to your play. The initial -# key exchange and starting up of the daemon will occur over SSH, but all commands and -# subsequent actions will be conducted over the raw socket connection using AES encryption - -- hosts: devservers - accelerate: true - tasks: - - command: /usr/bin/anything -''' - -import base64 -import errno -import getpass -import json -import os -import os.path -import pwd -import signal -import socket -import struct -import sys -import syslog -import tempfile -import time -import traceback - -import SocketServer - -from datetime import datetime -from threading import Thread, Lock - -# import module snippets -# we must import this here at the top so we can use get_module_path() -from ansible.module_utils.basic import * - -syslog.openlog('ansible-%s' % os.path.basename(__file__)) - -# the chunk size to read and send, assuming mtu 1500 and -# leaving room for base64 (+33%) encoding and header (100 bytes) -# 4 * (975/3) + 100 = 1400 -# which leaves room for the TCP/IP header -CHUNK_SIZE=10240 - -# FIXME: this all should be moved to module_common, as it's -# pretty much a copy from the callbacks/util code -DEBUG_LEVEL=0 -def log(msg, cap=0): - global DEBUG_LEVEL - if DEBUG_LEVEL >= cap: - syslog.syslog(syslog.LOG_NOTICE|syslog.LOG_DAEMON, msg) - -def v(msg): - log(msg, cap=1) - -def vv(msg): - log(msg, cap=2) - -def vvv(msg): - log(msg, cap=3) - -def vvvv(msg): - log(msg, cap=4) - - -HAS_KEYCZAR = False -try: - from keyczar.keys import AesKey - HAS_KEYCZAR = True -except ImportError: - pass - -SOCKET_FILE = os.path.join(get_module_path(), '.ansible-accelerate', ".local.socket") - -def get_pid_location(module): - """ - Try to find a pid directory in the common locations, falling - back to the user's home directory if no others exist - """ - for dir in ['/var/run', '/var/lib/run', '/run', os.path.expanduser("~/")]: - try: - if os.path.isdir(dir) and os.access(dir, os.R_OK|os.W_OK): - return os.path.join(dir, '.accelerate.pid') - except: - pass - module.fail_json(msg="couldn't find any valid directory to use for the accelerate pid file") - - -# NOTE: this shares a fair amount of code in common with async_wrapper, if async_wrapper were a new module we could move -# this into utils.module_common and probably should anyway - -def daemonize_self(module, password, port, minutes, pid_file): - # daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012 - try: - pid = os.fork() - if pid > 0: - vvv("exiting pid %s" % pid) - # exit first parent - module.exit_json(msg="daemonized accelerate on port %s for %s minutes with pid %s" % (port, minutes, str(pid))) - except OSError, e: - log("fork #1 failed: %d (%s)" % (e.errno, e.strerror)) - sys.exit(1) - - # decouple from parent environment - os.chdir("/") - os.setsid() - os.umask(022) - - # do second fork - try: - pid = os.fork() - if pid > 0: - log("daemon pid %s, writing %s" % (pid, pid_file)) - pid_file = open(pid_file, "w") - pid_file.write("%s" % pid) - pid_file.close() - vvv("pid file written") - sys.exit(0) - except OSError, e: - log("fork #2 failed: %d (%s)" % (e.errno, e.strerror)) - sys.exit(1) - - dev_null = file('/dev/null','rw') - os.dup2(dev_null.fileno(), sys.stdin.fileno()) - os.dup2(dev_null.fileno(), sys.stdout.fileno()) - os.dup2(dev_null.fileno(), sys.stderr.fileno()) - log("daemonizing successful") - -class LocalSocketThread(Thread): - server = None - terminated = False - - def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None): - self.server = kwargs.get('server') - Thread.__init__(self, group, target, name, args, kwargs, Verbose) - - def run(self): - try: - if os.path.exists(SOCKET_FILE): - os.remove(SOCKET_FILE) - else: - dir = os.path.dirname(SOCKET_FILE) - if os.path.exists(dir): - if not os.path.isdir(dir): - log("The socket file path (%s) exists, but is not a directory. No local connections will be available" % dir) - return - else: - # make sure the directory is accessible only to this - # user, as socket files derive their permissions from - # the directory that contains them - os.chmod(dir, 0700) - elif not os.path.exists(dir): - os.makedirs(dir, 0700) - except OSError: - pass - self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - self.s.bind(SOCKET_FILE) - self.s.listen(5) - while not self.terminated: - try: - conn, addr = self.s.accept() - vv("received local connection") - data = "" - while "\n" not in data: - data += conn.recv(2048) - try: - new_key = AesKey.Read(data.strip()) - found = False - for key in self.server.key_list: - try: - new_key.Decrypt(key.Encrypt("foo")) - found = True - break - except: - pass - if not found: - vv("adding new key to the key list") - self.server.key_list.append(new_key) - conn.sendall("OK\n") - else: - vv("key already exists in the key list, ignoring") - conn.sendall("EXISTS\n") - - # update the last event time so the server doesn't - # shutdown sooner than expected for new cliets - try: - self.server.last_event_lock.acquire() - self.server.last_event = datetime.now() - finally: - self.server.last_event_lock.release() - except Exception, e: - vv("key loaded locally was invalid, ignoring (%s)" % e) - conn.sendall("BADKEY\n") - finally: - try: - conn.close() - except: - pass - except: - pass - - def terminate(self): - self.terminated = True - self.s.shutdown(socket.SHUT_RDWR) - self.s.close() - -class ThreadWithReturnValue(Thread): - def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None): - Thread.__init__(self, group, target, name, args, kwargs, Verbose) - self._return = None - - def run(self): - if self._Thread__target is not None: - self._return = self._Thread__target(*self._Thread__args, - **self._Thread__kwargs) - - def join(self,timeout=None): - Thread.join(self, timeout=timeout) - return self._return - -class ThreadedTCPServer(SocketServer.ThreadingTCPServer): - key_list = [] - last_event = datetime.now() - last_event_lock = Lock() - def __init__(self, server_address, RequestHandlerClass, module, password, timeout, use_ipv6=False): - self.module = module - self.key_list.append(AesKey.Read(password)) - self.allow_reuse_address = True - self.timeout = timeout - - if use_ipv6: - self.address_family = socket.AF_INET6 - - if self.module.params.get('multi_key', False): - vv("starting thread to handle local connections for multiple keys") - self.local_thread = LocalSocketThread(kwargs=dict(server=self)) - self.local_thread.start() - - SocketServer.ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass) - - def shutdown(self): - self.local_thread.terminate() - self.running = False - SocketServer.ThreadingTCPServer.shutdown(self) - -class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): - # the key to use for this connection - active_key = None - - def send_data(self, data): - try: - self.server.last_event_lock.acquire() - self.server.last_event = datetime.now() - finally: - self.server.last_event_lock.release() - - packed_len = struct.pack('!Q', len(data)) - return self.request.sendall(packed_len + data) - - def recv_data(self): - header_len = 8 # size of a packed unsigned long long - data = "" - vvvv("in recv_data(), waiting for the header") - while len(data) < header_len: - try: - d = self.request.recv(header_len - len(data)) - if not d: - vvv("received nothing, bailing out") - return None - data += d - except: - # probably got a connection reset - vvvv("exception received while waiting for recv(), returning None") - return None - vvvv("in recv_data(), got the header, unpacking") - data_len = struct.unpack('!Q',data[:header_len])[0] - data = data[header_len:] - vvvv("data received so far (expecting %d): %d" % (data_len,len(data))) - while len(data) < data_len: - try: - d = self.request.recv(data_len - len(data)) - if not d: - vvv("received nothing, bailing out") - return None - data += d - vvvv("data received so far (expecting %d): %d" % (data_len,len(data))) - except: - # probably got a connection reset - vvvv("exception received while waiting for recv(), returning None") - return None - vvvv("received all of the data, returning") - - try: - self.server.last_event_lock.acquire() - self.server.last_event = datetime.now() - finally: - self.server.last_event_lock.release() - - return data - - def handle(self): - try: - while True: - vvvv("waiting for data") - data = self.recv_data() - if not data: - vvvv("received nothing back from recv_data(), breaking out") - break - vvvv("got data, decrypting") - if not self.active_key: - for key in self.server.key_list: - try: - data = key.Decrypt(data) - self.active_key = key - break - except: - pass - else: - vv("bad decrypt, exiting the connection handler") - return - else: - try: - data = self.active_key.Decrypt(data) - except: - vv("bad decrypt, exiting the connection handler") - return - - vvvv("decryption done, loading json from the data") - data = json.loads(data) - - mode = data['mode'] - response = {} - last_pong = datetime.now() - if mode == 'command': - vvvv("received a command request, running it") - twrv = ThreadWithReturnValue(target=self.command, args=(data,)) - twrv.start() - response = None - while twrv.is_alive(): - if (datetime.now() - last_pong).seconds >= 15: - last_pong = datetime.now() - vvvv("command still running, sending keepalive packet") - data2 = json.dumps(dict(pong=True)) - data2 = self.active_key.Encrypt(data2) - self.send_data(data2) - time.sleep(0.1) - response = twrv._return - vvvv("thread is done, response from join was %s" % response) - elif mode == 'put': - vvvv("received a put request, putting it") - response = self.put(data) - elif mode == 'fetch': - vvvv("received a fetch request, getting it") - response = self.fetch(data) - elif mode == 'validate_user': - vvvv("received a request to validate the user id") - response = self.validate_user(data) - - vvvv("response result is %s" % str(response)) - json_response = json.dumps(response) - vvvv("dumped json is %s" % json_response) - data2 = self.active_key.Encrypt(json_response) - vvvv("sending the response back to the controller") - self.send_data(data2) - vvvv("done sending the response") - - if mode == 'validate_user' and response.get('rc') == 1: - vvvv("detected a uid mismatch, shutting down") - self.server.shutdown() - except: - tb = traceback.format_exc() - log("encountered an unhandled exception in the handle() function") - log("error was:\n%s" % tb) - if self.active_key: - data2 = json.dumps(dict(rc=1, failed=True, msg="unhandled error in the handle() function")) - data2 = self.active_key.Encrypt(data2) - self.send_data(data2) - - def validate_user(self, data): - if 'username' not in data: - return dict(failed=True, msg='No username specified') - - vvvv("validating we're running as %s" % data['username']) - - # get the current uid - c_uid = os.getuid() - try: - # the target uid - t_uid = pwd.getpwnam(data['username']).pw_uid - except: - vvvv("could not find user %s" % data['username']) - return dict(failed=True, msg='could not find user %s' % data['username']) - - # and return rc=0 for success, rc=1 for failure - if c_uid == t_uid: - return dict(rc=0) - else: - return dict(rc=1) - - def command(self, data): - if 'cmd' not in data: - return dict(failed=True, msg='internal error: cmd is required') - if 'tmp_path' not in data: - return dict(failed=True, msg='internal error: tmp_path is required') - - vvvv("executing: %s" % data['cmd']) - - use_unsafe_shell = False - executable = data.get('executable') - if executable: - use_unsafe_shell = True - - rc, stdout, stderr = self.server.module.run_command(data['cmd'], executable=executable, use_unsafe_shell=use_unsafe_shell, close_fds=True) - if stdout is None: - stdout = '' - if stderr is None: - stderr = '' - vvvv("got stdout: %s" % stdout) - vvvv("got stderr: %s" % stderr) - - return dict(rc=rc, stdout=stdout, stderr=stderr) - - def fetch(self, data): - if 'in_path' not in data: - return dict(failed=True, msg='internal error: in_path is required') - - try: - fd = file(data['in_path'], 'rb') - fstat = os.stat(data['in_path']) - vvv("FETCH file is %d bytes" % fstat.st_size) - while fd.tell() < fstat.st_size: - data = fd.read(CHUNK_SIZE) - last = False - if fd.tell() >= fstat.st_size: - last = True - data = dict(data=base64.b64encode(data), last=last) - data = json.dumps(data) - data = self.active_key.Encrypt(data) - - if self.send_data(data): - return dict(failed=True, stderr="failed to send data") - - response = self.recv_data() - if not response: - log("failed to get a response, aborting") - return dict(failed=True, stderr="Failed to get a response from %s" % self.host) - response = self.active_key.Decrypt(response) - response = json.loads(response) - - if response.get('failed',False): - log("got a failed response from the master") - return dict(failed=True, stderr="Master reported failure, aborting transfer") - except Exception, e: - fd.close() - tb = traceback.format_exc() - log("failed to fetch the file: %s" % tb) - return dict(failed=True, stderr="Could not fetch the file: %s" % str(e)) - - fd.close() - return dict() - - def put(self, data): - if 'data' not in data: - return dict(failed=True, msg='internal error: data is required') - if 'out_path' not in data: - return dict(failed=True, msg='internal error: out_path is required') - - final_path = None - if 'user' in data and data.get('user') != getpass.getuser(): - vvv("the target user doesn't match this user, we'll move the file into place via sudo") - tmp_path = os.path.expanduser('~/.ansible/tmp/') - if not os.path.exists(tmp_path): - try: - os.makedirs(tmp_path, 0700) - except: - return dict(failed=True, msg='could not create a temporary directory at %s' % tmp_path) - (fd,out_path) = tempfile.mkstemp(prefix='ansible.', dir=tmp_path) - out_fd = os.fdopen(fd, 'w', 0) - final_path = data['out_path'] - else: - out_path = data['out_path'] - out_fd = open(out_path, 'w') - - try: - bytes=0 - while True: - out = base64.b64decode(data['data']) - bytes += len(out) - out_fd.write(out) - response = json.dumps(dict()) - response = self.active_key.Encrypt(response) - self.send_data(response) - if data['last']: - break - data = self.recv_data() - if not data: - raise "" - data = self.active_key.Decrypt(data) - data = json.loads(data) - except: - out_fd.close() - tb = traceback.format_exc() - log("failed to put the file: %s" % tb) - return dict(failed=True, stdout="Could not write the file") - - vvvv("wrote %d bytes" % bytes) - out_fd.close() - - if final_path: - vvv("moving %s to %s" % (out_path, final_path)) - self.server.module.atomic_move(out_path, final_path) - return dict() - -def daemonize(module, password, port, timeout, minutes, use_ipv6, pid_file): - try: - daemonize_self(module, password, port, minutes, pid_file) - - def timer_handler(signum, _): - try: - server.last_event_lock.acquire() - td = datetime.now() - server.last_event - # older python timedelta objects don't have total_seconds(), - # so we use the formula from the docs to calculate it - total_seconds = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 - if total_seconds >= minutes * 60: - log("server has been idle longer than the timeout, shutting down") - server.running = False - server.shutdown() - else: - # reschedule the check - vvvv("daemon idle for %d seconds (timeout=%d)" % (total_seconds,minutes*60)) - signal.alarm(30) - except: - pass - finally: - server.last_event_lock.release() - - signal.signal(signal.SIGALRM, timer_handler) - signal.alarm(30) - - tries = 5 - while tries > 0: - try: - if use_ipv6: - address = ("::", port) - else: - address = ("0.0.0.0", port) - server = ThreadedTCPServer(address, ThreadedTCPRequestHandler, module, password, timeout, use_ipv6=use_ipv6) - server.allow_reuse_address = True - break - except Exception, e: - vv("Failed to create the TCP server (tries left = %d) (error: %s) " % (tries,e)) - tries -= 1 - time.sleep(0.2) - - if tries == 0: - vv("Maximum number of attempts to create the TCP server reached, bailing out") - raise Exception("max # of attempts to serve reached") - - # run the server in a separate thread to make signal handling work - server_thread = Thread(target=server.serve_forever, kwargs=dict(poll_interval=0.1)) - server_thread.start() - server.running = True - - v("serving!") - while server.running: - time.sleep(1) - - # wait for the thread to exit fully - server_thread.join() - - v("server thread terminated, exiting!") - sys.exit(0) - except Exception, e: - tb = traceback.format_exc() - log("exception caught, exiting accelerated mode: %s\n%s" % (e, tb)) - sys.exit(0) - -def main(): - global DEBUG_LEVEL - module = AnsibleModule( - argument_spec = dict( - port=dict(required=False, default=5099), - ipv6=dict(required=False, default=False, type='bool'), - multi_key=dict(required=False, default=False, type='bool'), - timeout=dict(required=False, default=300), - password=dict(required=True), - minutes=dict(required=False, default=30), - debug=dict(required=False, default=0, type='int') - ), - supports_check_mode=True - ) - - password = base64.b64decode(module.params['password']) - port = int(module.params['port']) - timeout = int(module.params['timeout']) - minutes = int(module.params['minutes']) - debug = int(module.params['debug']) - ipv6 = module.params['ipv6'] - multi_key = module.params['multi_key'] - - if not HAS_KEYCZAR: - module.fail_json(msg="keyczar is not installed (on the remote side)") - - DEBUG_LEVEL=debug - pid_file = get_pid_location(module) - - daemon_pid = None - daemon_running = False - if os.path.exists(pid_file): - try: - daemon_pid = int(open(pid_file).read()) - try: - # sending signal 0 doesn't do anything to the - # process, other than tell the calling program - # whether other signals can be sent - os.kill(daemon_pid, 0) - except OSError, e: - if e.errno == errno.EPERM: - # no permissions means the pid is probably - # running, but as a different user, so fail - module.fail_json(msg="the accelerate daemon appears to be running as a different user that this user cannot access (pid=%d)" % daemon_pid) - else: - daemon_running = True - except ValueError: - # invalid pid file, unlink it - otherwise we don't care - try: - os.unlink(pid_file) - except: - pass - - if daemon_running and multi_key: - # try to connect to the file socket for the daemon if it exists - s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - try: - s.connect(SOCKET_FILE) - s.sendall(password + '\n') - data = "" - while '\n' not in data: - data += s.recv(2048) - res = data.strip() - except: - module.fail_json(msg="failed to connect to the local socket file") - finally: - try: - s.close() - except: - pass - - if res in ("OK", "EXISTS"): - module.exit_json(msg="transferred new key to the existing daemon") - else: - module.fail_json(msg="could not transfer new key: %s" % data.strip()) - else: - # try to start up the daemon - daemonize(module, password, port, timeout, minutes, ipv6, pid_file) - -main() diff --git a/library/utilities/assert b/library/utilities/assert deleted file mode 100644 index f5963d60cd7..00000000000 --- a/library/utilities/assert +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2012 Dag Wieers -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: assert -short_description: Fail with custom message -description: - - This module asserts that a given expression is true and can be a simpler alternative to the 'fail' module in some cases. -version_added: "1.5" -options: - that: - description: - - "A string expression of the same form that can be passed to the 'when' statement" - - "Alternatively, a list of string expressions" - required: true -author: Michael DeHaan -''' - -EXAMPLES = ''' -- assert: { that: "ansible_os_family != 'RedHat'" } - -- assert: - that: - - "'foo' in some_command_result.stdout" - - "number_of_the_counting == 3" -''' diff --git a/library/utilities/debug b/library/utilities/debug deleted file mode 100644 index 2df68ca0830..00000000000 --- a/library/utilities/debug +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2012 Dag Wieers -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: debug -short_description: Print statements during execution -description: - - This module prints statements during execution and can be useful - for debugging variables or expressions without necessarily halting - the playbook. Useful for debugging together with the 'when:' directive. - -version_added: "0.8" -options: - msg: - description: - - The customized message that is printed. If omitted, prints a generic - message. - required: false - default: "Hello world!" - var: - description: - - A variable name to debug. Mutually exclusive with the 'msg' option. -author: Dag Wieers, Michael DeHaan -''' - -EXAMPLES = ''' -# Example that prints the loopback address and gateway for each host -- debug: msg="System {{ inventory_hostname }} has uuid {{ ansible_product_uuid }}" - -- debug: msg="System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}" - when: ansible_default_ipv4.gateway is defined - -- shell: /usr/bin/uptime - register: result - -- debug: var=result - -- name: Display all variables/facts known for a host - debug: var=hostvars[inventory_hostname] -''' diff --git a/library/utilities/fail b/library/utilities/fail deleted file mode 100644 index 23f5b83668c..00000000000 --- a/library/utilities/fail +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2012 Dag Wieers -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: fail -short_description: Fail with custom message -description: - - This module fails the progress with a custom message. It can be - useful for bailing out when a certain condition is met using C(when). -version_added: "0.8" -options: - msg: - description: - - The customized message used for failing execution. If omitted, - fail will simple bail out with a generic message. - required: false - default: "'Failed as requested from task'" - -author: Dag Wieers -''' - -EXAMPLES = ''' -# Example playbook using fail and when together -- fail: msg="The system may not be provisioned according to the CMDB status." - when: cmdb_status != "to-be-staged" -''' diff --git a/library/utilities/fireball b/library/utilities/fireball deleted file mode 100644 index 43760969a89..00000000000 --- a/library/utilities/fireball +++ /dev/null @@ -1,280 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: fireball -short_description: Enable fireball mode on remote node -description: - - This modules launches an ephemeral I(fireball) ZeroMQ message bus daemon on the remote node which - Ansible can use to communicate with nodes at high speed. - - The daemon listens on a configurable port for a configurable amount of time. - - Starting a new fireball as a given user terminates any existing user fireballs. - - Fireball mode is AES encrypted -version_added: "0.9" -options: - port: - description: - - TCP port for ZeroMQ - required: false - default: 5099 - aliases: [] - minutes: - description: - - The I(fireball) listener daemon is started on nodes and will stay around for - this number of minutes before turning itself off. - required: false - default: 30 -notes: - - See the advanced playbooks chapter for more about using fireball mode. -requirements: [ "zmq", "keyczar" ] -author: Michael DeHaan -''' - -EXAMPLES = ''' -# This example playbook has two plays: the first launches 'fireball' mode on all hosts via SSH, and -# the second actually starts using it for subsequent management over the fireball connection - -- hosts: devservers - gather_facts: false - connection: ssh - sudo: yes - tasks: - - action: fireball - -- hosts: devservers - connection: fireball - tasks: - - command: /usr/bin/anything -''' - -import os -import sys -import shutil -import time -import base64 -import syslog -import signal -import time -import signal -import traceback - -syslog.openlog('ansible-%s' % os.path.basename(__file__)) -PIDFILE = os.path.expanduser("~/.fireball.pid") - -def log(msg): - syslog.syslog(syslog.LOG_NOTICE, msg) - -if os.path.exists(PIDFILE): - try: - data = int(open(PIDFILE).read()) - try: - os.kill(data, signal.SIGKILL) - except OSError: - pass - except ValueError: - pass - os.unlink(PIDFILE) - -HAS_ZMQ = False -try: - import zmq - HAS_ZMQ = True -except ImportError: - pass - -HAS_KEYCZAR = False -try: - from keyczar.keys import AesKey - HAS_KEYCZAR = True -except ImportError: - pass - -# NOTE: this shares a fair amount of code in common with async_wrapper, if async_wrapper were a new module we could move -# this into utils.module_common and probably should anyway - -def daemonize_self(module, password, port, minutes): - # daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012 - try: - pid = os.fork() - if pid > 0: - log("exiting pid %s" % pid) - # exit first parent - module.exit_json(msg="daemonized fireball on port %s for %s minutes" % (port, minutes)) - except OSError, e: - log("fork #1 failed: %d (%s)" % (e.errno, e.strerror)) - sys.exit(1) - - # decouple from parent environment - os.chdir("/") - os.setsid() - os.umask(022) - - # do second fork - try: - pid = os.fork() - if pid > 0: - log("daemon pid %s, writing %s" % (pid, PIDFILE)) - pid_file = open(PIDFILE, "w") - pid_file.write("%s" % pid) - pid_file.close() - log("pidfile written") - sys.exit(0) - except OSError, e: - log("fork #2 failed: %d (%s)" % (e.errno, e.strerror)) - sys.exit(1) - - dev_null = file('/dev/null','rw') - os.dup2(dev_null.fileno(), sys.stdin.fileno()) - os.dup2(dev_null.fileno(), sys.stdout.fileno()) - os.dup2(dev_null.fileno(), sys.stderr.fileno()) - log("daemonizing successful (%s,%s)" % (password, port)) - -def command(module, data): - if 'cmd' not in data: - return dict(failed=True, msg='internal error: cmd is required') - if 'tmp_path' not in data: - return dict(failed=True, msg='internal error: tmp_path is required') - if 'executable' not in data: - return dict(failed=True, msg='internal error: executable is required') - - log("executing: %s" % data['cmd']) - rc, stdout, stderr = module.run_command(data['cmd'], executable=data['executable'], close_fds=True) - if stdout is None: - stdout = '' - if stderr is None: - stderr = '' - log("got stdout: %s" % stdout) - - return dict(rc=rc, stdout=stdout, stderr=stderr) - -def fetch(data): - if 'in_path' not in data: - return dict(failed=True, msg='internal error: in_path is required') - - # FIXME: should probably support chunked file transfer for binary files - # at some point. For now, just base64 encodes the file - # so don't use it to move ISOs, use rsync. - - fh = open(data['in_path']) - data = base64.b64encode(fh.read()) - return dict(data=data) - -def put(data): - - if 'data' not in data: - return dict(failed=True, msg='internal error: data is required') - if 'out_path' not in data: - return dict(failed=True, msg='internal error: out_path is required') - - # FIXME: should probably support chunked file transfer for binary files - # at some point. For now, just base64 encodes the file - # so don't use it to move ISOs, use rsync. - - fh = open(data['out_path'], 'w') - fh.write(base64.b64decode(data['data'])) - fh.close() - - return dict() - -def serve(module, password, port, minutes): - - - log("serving") - context = zmq.Context() - socket = context.socket(zmq.REP) - addr = "tcp://*:%s" % port - log("zmq serving on %s" % addr) - socket.bind(addr) - - # password isn't so much a password but a serialized AesKey object that we xferred over SSH - # password as a variable in ansible is never logged though, so it serves well - - key = AesKey.Read(password) - - while True: - - data = socket.recv() - - try: - data = key.Decrypt(data) - except: - continue - - data = json.loads(data) - - mode = data['mode'] - response = {} - - if mode == 'command': - response = command(module, data) - elif mode == 'put': - response = put(data) - elif mode == 'fetch': - response = fetch(data) - - data2 = json.dumps(response) - data2 = key.Encrypt(data2) - socket.send(data2) - -def daemonize(module, password, port, minutes): - - try: - daemonize_self(module, password, port, minutes) - - def catcher(signum, _): - module.exit_json(msg='timer expired') - - signal.signal(signal.SIGALRM, catcher) - signal.setitimer(signal.ITIMER_REAL, 60 * minutes) - - - serve(module, password, port, minutes) - except Exception, e: - tb = traceback.format_exc() - log("exception caught, exiting fireball mode: %s\n%s" % (e, tb)) - sys.exit(0) - -def main(): - - module = AnsibleModule( - argument_spec = dict( - port=dict(required=False, default=5099), - password=dict(required=True), - minutes=dict(required=False, default=30), - ), - supports_check_mode=True - ) - - password = base64.b64decode(module.params['password']) - port = module.params['port'] - minutes = int(module.params['minutes']) - - if not HAS_ZMQ: - module.fail_json(msg="zmq is not installed") - if not HAS_KEYCZAR: - module.fail_json(msg="keyczar is not installed") - - daemonize(module, password, port, minutes) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/utilities/include_vars b/library/utilities/include_vars deleted file mode 100644 index 4c7c39d9035..00000000000 --- a/library/utilities/include_vars +++ /dev/null @@ -1,39 +0,0 @@ -# -*- mode: python -*- - -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -author: Benno Joy -module: include_vars -short_description: Load variables from files, dynamically within a task. -description: - - Loads variables from a YAML file dynamically during task runtime. It can work with conditionals, or use host specific variables to determine the path name to load from. -options: - free-form: - description: - - The file name from which variables should be loaded, if called from a role it will look for - the file in vars/ subdirectory of the role, otherwise the path would be relative to playbook. An absolute path can also be provided. - required: true -version_added: "1.4" -''' - -EXAMPLES = """ -# Conditionally decide to load in variables when x is 0, otherwise do not. -- include_vars: contingency_plan.yml - when: x == 0 - -# Load a variable file based on the OS type, or a default if not found. -- include_vars: "{{ item }}" - with_first_found: - - "{{ ansible_distribution }}.yml" - - "{{ ansible_os_family }}.yml" - - "default.yml" - -""" diff --git a/library/utilities/pause b/library/utilities/pause deleted file mode 100644 index 6e8a83afe61..00000000000 --- a/library/utilities/pause +++ /dev/null @@ -1,40 +0,0 @@ -# -*- mode: python -*- - -DOCUMENTATION = ''' ---- -module: pause -short_description: Pause playbook execution -description: - - Pauses playbook execution for a set amount of time, or until a prompt is acknowledged. All parameters are optional. The default behavior is to pause with a prompt. - - "You can use C(ctrl+c) if you wish to advance a pause earlier than it is set to expire or if you need to abort a playbook run entirely. To continue early: press C(ctrl+c) and then C(c). To abort a playbook: press C(ctrl+c) and then C(a)." - - "The pause module integrates into async/parallelized playbooks without any special considerations (see also: Rolling Updates). When using pauses with the C(serial) playbook parameter (as in rolling updates) you are only prompted once for the current group of hosts." -version_added: "0.8" -options: - minutes: - description: - - Number of minutes to pause for. - required: false - default: null - seconds: - description: - - Number of seconds to pause for. - required: false - default: null - prompt: - description: - - Optional text to use for the prompt message. - required: false - default: null -author: Tim Bielawa -''' - -EXAMPLES = ''' -# Pause for 5 minutes to build app cache. -- pause: minutes=5 - -# Pause until you can verify updates to an application were successful. -- pause: - -# A helpful reminder of what to look out for post-update. -- pause: prompt="Make sure org.foo.FooOverload exception is not present" -''' diff --git a/library/utilities/set_fact b/library/utilities/set_fact deleted file mode 100644 index ea67cc43a3f..00000000000 --- a/library/utilities/set_fact +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Dag Wieers -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -author: Dag Wieers -module: set_fact -short_description: Set host facts from a task -description: - - This module allows setting new variables. Variables are set on a host-by-host basis - just like facts discovered by the setup module. - - These variables will survive between plays. -options: - key_value: - description: - - The C(set_fact) module takes key=value pairs as variables to set - in the playbook scope. Or alternatively, accepts complex arguments - using the C(args:) statement. - required: true - default: null -version_added: "1.2" -''' - -EXAMPLES = ''' -# Example setting host facts using key=value pairs -- set_fact: one_fact="something" other_fact="{{ local_var * 2 }}" - -# Example setting host facts using complex arguments -- set_fact: - one_fact: something - other_fact: "{{ local_var * 2 }}" - -# As of 1.8, Ansible will convert boolean strings ('true', 'false', 'yes', 'no') -# to proper boolean values when using the key=value syntax, however it is still -# recommended that booleans be set using the complex argument style: -- set_fact: - one_fact: true - other_fact: false - -''' diff --git a/library/utilities/wait_for b/library/utilities/wait_for deleted file mode 100644 index 5e02712ddff..00000000000 --- a/library/utilities/wait_for +++ /dev/null @@ -1,462 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Jeroen Hoekx -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import socket -import datetime -import time -import sys -import re -import binascii - -HAS_PSUTIL = False -try: - import psutil - HAS_PSUTIL = True - # just because we can import it on Linux doesn't mean we will use it -except ImportError: - pass - -DOCUMENTATION = ''' ---- -module: wait_for -short_description: Waits for a condition before continuing. -description: - - Waiting for a port to become available is useful for when services - are not immediately available after their init scripts return - - which is true of certain Java application servers. It is also - useful when starting guests with the M(virt) module and - needing to pause until they are ready. - - This module can also be used to wait for a regex match a string to be present in a file. - - In 1.6 and later, this module can - also be used to wait for a file to be available or absent on the - filesystem. - - In 1.8 and later, this module can also be used to wait for active - connections to be closed before continuing, useful if a node - is being rotated out of a load balancer pool. -version_added: "0.7" -options: - host: - description: - - hostname or IP address to wait for - required: false - default: "127.0.0.1" - aliases: [] - timeout: - description: - - maximum number of seconds to wait for - required: false - default: 300 - delay: - description: - - number of seconds to wait before starting to poll - required: false - default: 0 - port: - description: - - port number to poll - required: false - state: - description: - - either C(present), C(started), or C(stopped), C(absent), or C(drained) - - When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections - - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing, C(absent) will check that file is absent or removed - choices: [ "present", "started", "stopped", "absent", "drained" ] - default: "started" - path: - version_added: "1.4" - required: false - description: - - path to a file on the filesytem that must exist before continuing - search_regex: - version_added: "1.4" - required: false - description: - - Can be used to match a string in either a file or a socket connection. Defaults to a multiline regex. - exclude_hosts: - version_added: "1.8" - required: false - description: - - list of hosts or IPs to ignore when looking for active TCP connections for C(drained) state -notes: - - The ability to use search_regex with a port connection was added in 1.7. -requirements: [] -author: Jeroen Hoekx, John Jarvis, Andrii Radyk -''' - -EXAMPLES = ''' - -# wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds -- wait_for: port=8000 delay=10 - -# wait 300 seconds for port 8000 of any IP to close active connections, don't start checking for 10 seconds -- wait_for: host=0.0.0.0 port=8000 delay=10 state=drained - -# wait 300 seconds for port 8000 of any IP to close active connections, ignoring connections for specified hosts -- wait_for: host=0.0.0.0 port=8000 state=drained exclude_hosts=10.2.1.2,10.2.1.3 - -# wait until the file /tmp/foo is present before continuing -- wait_for: path=/tmp/foo - -# wait until the string "completed" is in the file /tmp/foo before continuing -- wait_for: path=/tmp/foo search_regex=completed - -# wait until the lock file is removed -- wait_for: path=/var/lock/file.lock state=absent - -# wait until the process is finished and pid was destroyed -- wait_for: path=/proc/3466/status state=absent - -# Wait 300 seconds for port 22 to become open and contain "OpenSSH", don't start checking for 10 seconds -- local_action: wait_for port=22 host="{{ inventory_hostname }}" search_regex=OpenSSH delay=10 - -''' - -class TCPConnectionInfo(object): - """ - This is a generic TCP Connection Info strategy class that relies - on the psutil module, which is not ideal for targets, but necessary - for cross platform support. - - A subclass may wish to override some or all of these methods. - - _get_exclude_ips() - - get_active_connections() - - All subclasses MUST define platform and distribution (which may be None). - """ - platform = 'Generic' - distribution = None - - match_all_ips = { - socket.AF_INET: '0.0.0.0', - socket.AF_INET6: '::', - } - connection_states = { - '01': 'ESTABLISHED', - '02': 'SYN_SENT', - '03': 'SYN_RECV', - '04': 'FIN_WAIT1', - '05': 'FIN_WAIT2', - '06': 'TIME_WAIT', - } - - def __new__(cls, *args, **kwargs): - return load_platform_subclass(TCPConnectionInfo, args, kwargs) - - def __init__(self, module): - self.module = module - (self.family, self.ip) = _convert_host_to_ip(self.module.params['host']) - self.port = int(self.module.params['port']) - self.exclude_ips = self._get_exclude_ips() - if not HAS_PSUTIL: - module.fail_json(msg="psutil module required for wait_for") - - def _get_exclude_ips(self): - if self.module.params['exclude_hosts'] is None: - return [] - exclude_hosts = self.module.params['exclude_hosts'].split(',') - return [ _convert_host_to_hex(h)[1] for h in exclude_hosts ] - - def get_active_connections_count(self): - active_connections = 0 - for p in psutil.process_iter(): - connections = p.get_connections(kind='inet') - for conn in connections: - if conn.status not in self.connection_states.values(): - continue - (local_ip, local_port) = conn.local_address - if self.port == local_port and self.ip in [self.match_all_ips[self.family], local_ip]: - (remote_ip, remote_port) = conn.remote_address - if remote_ip not in self.exclude_ips: - active_connections += 1 - return active_connections - - -# =========================================== -# Subclass: Linux - -class LinuxTCPConnectionInfo(TCPConnectionInfo): - """ - This is a TCP Connection Info evaluation strategy class - that utilizes information from Linux's procfs. While less universal, - does allow Linux targets to not require an additional library. - """ - platform = 'Linux' - distribution = None - - source_file = { - socket.AF_INET: '/proc/net/tcp', - socket.AF_INET6: '/proc/net/tcp6' - } - match_all_ips = { - socket.AF_INET: '00000000', - socket.AF_INET6: '00000000000000000000000000000000', - } - local_address_field = 1 - remote_address_field = 2 - connection_state_field = 3 - - def __init__(self, module): - self.module = module - (self.family, self.ip) = _convert_host_to_hex(module.params['host']) - self.port = "%0.4X" % int(module.params['port']) - self.exclude_ips = self._get_exclude_ips() - - def _get_exclude_ips(self): - if self.module.params['exclude_hosts'] is None: - return [] - exclude_hosts = self.module.params['exclude_hosts'].split(',') - return [ _convert_host_to_hex(h) for h in exclude_hosts ] - - def get_active_connections_count(self): - active_connections = 0 - f = open(self.source_file[self.family]) - for tcp_connection in f.readlines(): - tcp_connection = tcp_connection.strip().split(' ') - if tcp_connection[self.local_address_field] == 'local_address': - continue - if tcp_connection[self.connection_state_field] not in self.connection_states: - continue - (local_ip, local_port) = tcp_connection[self.local_address_field].split(':') - if self.port == local_port and self.ip in [self.match_all_ips[self.family], local_ip]: - (remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':') - if remote_ip not in self.exclude_ips: - active_connections += 1 - f.close() - return active_connections - - -def _convert_host_to_ip(host): - """ - Perform forward DNS resolution on host, IP will give the same IP - - Args: - host: String with either hostname, IPv4, or IPv6 address - - Returns: - Tuple containing address family and IP - """ - addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP)[0] - return (addrinfo[0], addrinfo[4][0]) - -def _convert_host_to_hex(host): - """ - Convert the provided host to the format in /proc/net/tcp* - - /proc/net/tcp uses little-endian four byte hex for ipv4 - /proc/net/tcp6 uses little-endian per 4B word for ipv6 - - Args: - host: String with either hostname, IPv4, or IPv6 address - - Returns: - Tuple containing address family and the little-endian converted host - """ - (family, ip) = _convert_host_to_ip(host) - hexed = binascii.hexlify(socket.inet_pton(family, ip)).upper() - if family == socket.AF_INET: - hexed = _little_endian_convert_32bit(hexed) - elif family == socket.AF_INET6: - # xrange loops through each 8 character (4B) set in the 128bit total - hexed = "".join([ _little_endian_convert_32bit(hexed[x:x+8]) for x in xrange(0, 32, 8) ]) - return (family, hexed) - -def _little_endian_convert_32bit(block): - """ - Convert to little-endian, effectively transposing - the order of the four byte word - 12345678 -> 78563412 - - Args: - block: String containing a 4 byte hex representation - - Returns: - String containing the little-endian converted block - """ - # xrange starts at 6, and increments by -2 until it reaches -2 - # which lets us start at the end of the string block and work to the begining - return "".join([ block[x:x+2] for x in xrange(6, -2, -2) ]) - -def main(): - - module = AnsibleModule( - argument_spec = dict( - host=dict(default='127.0.0.1'), - timeout=dict(default=300), - connect_timeout=dict(default=5), - delay=dict(default=0), - port=dict(default=None), - path=dict(default=None), - search_regex=dict(default=None), - state=dict(default='started', choices=['started', 'stopped', 'present', 'absent', 'drained']), - exclude_hosts=dict(default=None, type='list') - ), - ) - - params = module.params - - host = params['host'] - timeout = int(params['timeout']) - connect_timeout = int(params['connect_timeout']) - delay = int(params['delay']) - if params['port']: - port = int(params['port']) - else: - port = None - state = params['state'] - path = params['path'] - search_regex = params['search_regex'] - if params['exclude_hosts']: - exclude_hosts = params['exclude_hosts'].split(',') - else: - exclude_hosts = [] - - if port and path: - module.fail_json(msg="port and path parameter can not both be passed to wait_for") - if path and state == 'stopped': - module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module") - if path and state == 'drained': - module.fail_json(msg="state=drained should only be used for checking a port in the wait_for module") - if exclude_hosts and state != 'drained': - module.fail_json(msg="exclude_hosts should only be with state=drained") - - start = datetime.datetime.now() - - if delay: - time.sleep(delay) - - if state in [ 'stopped', 'absent' ]: - ### first wait for the stop condition - end = start + datetime.timedelta(seconds=timeout) - - while datetime.datetime.now() < end: - if path: - try: - f = open(path) - f.close() - time.sleep(1) - pass - except IOError: - break - elif port: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.settimeout(connect_timeout) - try: - s.connect( (host, port) ) - s.shutdown(socket.SHUT_RDWR) - s.close() - time.sleep(1) - except: - break - else: - elapsed = datetime.datetime.now() - start - if port: - module.fail_json(msg="Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds) - elif path: - module.fail_json(msg="Timeout when waiting for %s to be absent." % (path), elapsed=elapsed.seconds) - - elif state in ['started', 'present']: - ### wait for start condition - end = start + datetime.timedelta(seconds=timeout) - while datetime.datetime.now() < end: - if path: - try: - os.stat(path) - if search_regex: - try: - f = open(path) - try: - if re.search(search_regex, f.read(), re.MULTILINE): - break - else: - time.sleep(1) - finally: - f.close() - except IOError: - time.sleep(1) - pass - else: - break - except OSError, e: - # File not present - if e.errno == 2: - time.sleep(1) - else: - elapsed = datetime.datetime.now() - start - module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds) - elif port: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.settimeout(connect_timeout) - try: - s.connect( (host, port) ) - if search_regex: - data = '' - matched = False - while 1: - data += s.recv(1024) - if not data: - break - elif re.search(search_regex, data, re.MULTILINE): - matched = True - break - if matched: - s.shutdown(socket.SHUT_RDWR) - s.close() - break - else: - s.shutdown(socket.SHUT_RDWR) - s.close() - break - except: - time.sleep(1) - pass - else: - elapsed = datetime.datetime.now() - start - if port: - if search_regex: - module.fail_json(msg="Timeout when waiting for search string %s in %s:%s" % (search_regex, host, port), elapsed=elapsed.seconds) - else: - module.fail_json(msg="Timeout when waiting for %s:%s" % (host, port), elapsed=elapsed.seconds) - elif path: - if search_regex: - module.fail_json(msg="Timeout when waiting for search string %s in %s" % (search_regex, path), elapsed=elapsed.seconds) - else: - module.fail_json(msg="Timeout when waiting for file %s" % (path), elapsed=elapsed.seconds) - - elif state == 'drained': - ### wait until all active connections are gone - end = start + datetime.timedelta(seconds=timeout) - tcpconns = TCPConnectionInfo(module) - while datetime.datetime.now() < end: - try: - if tcpconns.get_active_connections_count() == 0: - break - except IOError: - pass - time.sleep(1) - else: - elapsed = datetime.datetime.now() - start - module.fail_json(msg="Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds) - - elapsed = datetime.datetime.now() - start - module.exit_json(state=state, port=port, search_regex=search_regex, path=path, elapsed=elapsed.seconds) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/web_infrastructure/apache2_module b/library/web_infrastructure/apache2_module deleted file mode 100644 index 39351482087..00000000000 --- a/library/web_infrastructure/apache2_module +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013-2014, Christian Berendt -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: apache2_module -version_added: 1.6 -short_description: enables/disables a module of the Apache2 webserver -description: - - Enables or disables a specified module of the Apache2 webserver. -options: - name: - description: - - name of the module to enable/disable - required: true - state: - description: - - indicate the desired state of the resource - choices: ['present', 'absent'] - default: present - -''' - -EXAMPLES = ''' -# enables the Apache2 module "wsgi" -- apache2_module: state=present name=wsgi - -# disables the Apache2 module "wsgi" -- apache2_module: state=absent name=wsgi -''' - -import re - -def _disable_module(module): - name = module.params['name'] - a2dismod_binary = module.get_bin_path("a2dismod") - result, stdout, stderr = module.run_command("%s %s" % (a2dismod_binary, name)) - - if re.match(r'.*' + name + r' already disabled.*', stdout, re.S): - module.exit_json(changed = False, result = "Success") - elif result != 0: - module.fail_json(msg="Failed to disable module %s: %s" % (name, stdout)) - else: - module.exit_json(changed = True, result = "Disabled") - -def _enable_module(module): - name = module.params['name'] - a2enmod_binary = module.get_bin_path("a2enmod") - result, stdout, stderr = module.run_command("%s %s" % (a2enmod_binary, name)) - - if re.match(r'.*' + name + r' already enabled.*', stdout, re.S): - module.exit_json(changed = False, result = "Success") - elif result != 0: - module.fail_json(msg="Failed to enable module %s: %s" % (name, stdout)) - else: - module.exit_json(changed = True, result = "Enabled") - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - state = dict(default='present', choices=['absent', 'present']) - ), - ) - - if module.params['state'] == 'present': - _enable_module(module) - - if module.params['state'] == 'absent': - _disable_module(module) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/web_infrastructure/django_manage b/library/web_infrastructure/django_manage deleted file mode 100644 index 580cc63c2dd..00000000000 --- a/library/web_infrastructure/django_manage +++ /dev/null @@ -1,281 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Scott Anderson -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: django_manage -short_description: Manages a Django application. -description: - - Manages a Django application using the I(manage.py) application frontend to I(django-admin). With the I(virtualenv) parameter, all management commands will be executed by the given I(virtualenv) installation. -version_added: "1.1" -options: - command: - choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ] - description: - - The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate. Other commands can be entered, but will fail if they're unknown to Django. - required: true - app_path: - description: - - The path to the root of the Django application where B(manage.py) lives. - required: true - settings: - description: - - The Python path to the application's settings module, such as 'myapp.settings'. - required: false - pythonpath: - description: - - A directory to add to the Python path. Typically used to include the settings module if it is located external to the application directory. - required: false - virtualenv: - description: - - An optional path to a I(virtualenv) installation to use while running the manage application. - required: false - apps: - description: - - A list of space-delimited apps to target. Used by the 'test' command. - required: false - cache_table: - description: - - The name of the table used for database-backed caching. Used by the 'createcachetable' command. - required: false - database: - description: - - The database to target. Used by the 'createcachetable', 'flush', 'loaddata', and 'syncdb' commands. - required: false - failfast: - description: - - Fail the command immediately if a test fails. Used by the 'test' command. - required: false - default: "no" - choices: [ "yes", "no" ] - fixtures: - description: - - A space-delimited list of fixture file names to load in the database. B(Required) by the 'loaddata' command. - required: false - skip: - description: - - Will skip over out-of-order missing migrations, you can only use this parameter with I(migrate) - required: false - version_added: "1.3" - merge: - description: - - Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with 'migrate' command - required: false - version_added: "1.3" - link: - description: - - Will create links to the files instead of copying them, you can only use this parameter with 'collectstatic' command - required: false - version_added: "1.3" -notes: - - I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified. - - This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location. - - This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately. - - To be able to use the migrate command, you must have south installed and added as an app in your settings - - To be able to use the collectstatic command, you must have enabled staticfiles in your settings -requirements: [ "virtualenv", "django" ] -author: Scott Anderson -''' - -EXAMPLES = """ -# Run cleanup on the application installed in 'django_dir'. -- django_manage: command=cleanup app_path={{ django_dir }} - -# Load the initial_data fixture into the application -- django_manage: command=loaddata app_path={{ django_dir }} fixtures={{ initial_data }} - -#Run syncdb on the application -- django_manage: > - command=syncdb - app_path={{ django_dir }} - settings={{ settings_app_name }} - pythonpath={{ settings_dir }} - virtualenv={{ virtualenv_dir }} - -#Run the SmokeTest test case from the main app. Useful for testing deploys. -- django_manage: command=test app_path=django_dir apps=main.SmokeTest -""" - - -import os - -def _fail(module, cmd, out, err, **kwargs): - msg = '' - if out: - msg += "stdout: %s" % (out, ) - if err: - msg += "\n:stderr: %s" % (err, ) - module.fail_json(cmd=cmd, msg=msg, **kwargs) - - -def _ensure_virtualenv(module): - - venv_param = module.params['virtualenv'] - if venv_param is None: - return - - vbin = os.path.join(os.path.expanduser(venv_param), 'bin') - activate = os.path.join(vbin, 'activate') - - if not os.path.exists(activate): - virtualenv = module.get_bin_path('virtualenv', True) - vcmd = '%s %s' % (virtualenv, venv_param) - vcmd = [virtualenv, venv_param] - rc, out_venv, err_venv = module.run_command(vcmd) - if rc != 0: - _fail(module, vcmd, out_venv, err_venv) - - os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"]) - os.environ["VIRTUAL_ENV"] = venv_param - -def createcachetable_filter_output(line): - return "Already exists" not in line - -def flush_filter_output(line): - return "Installed" in line and "Installed 0 object" not in line - -def loaddata_filter_output(line): - return "Installed" in line and "Installed 0 object" not in line - -def syncdb_filter_output(line): - return ("Creating table " in line) or ("Installed" in line and "Installed 0 object" not in line) - -def migrate_filter_output(line): - return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) - -def main(): - command_allowed_param_map = dict( - cleanup=(), - createcachetable=('cache_table', 'database', ), - flush=('database', ), - loaddata=('database', 'fixtures', ), - syncdb=('database', ), - test=('failfast', 'testrunner', 'liveserver', 'apps', ), - validate=(), - migrate=('apps', 'skip', 'merge'), - collectstatic=('link', ), - ) - - command_required_param_map = dict( - loaddata=('fixtures', ), - createcachetable=('cache_table', ), - ) - - # forces --noinput on every command that needs it - noinput_commands = ( - 'flush', - 'syncdb', - 'migrate', - 'test', - 'collectstatic', - ) - - # These params are allowed for certain commands only - specific_params = ('apps', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner') - - # These params are automatically added to the command if present - general_params = ('settings', 'pythonpath', 'database',) - specific_boolean_params = ('failfast', 'skip', 'merge', 'link') - end_of_command_params = ('apps', 'cache_table', 'fixtures') - - module = AnsibleModule( - argument_spec=dict( - command = dict(default=None, required=True), - app_path = dict(default=None, required=True), - settings = dict(default=None, required=False), - pythonpath = dict(default=None, required=False, aliases=['python_path']), - virtualenv = dict(default=None, required=False, aliases=['virtual_env']), - - apps = dict(default=None, required=False), - cache_table = dict(default=None, required=False), - database = dict(default=None, required=False), - failfast = dict(default='no', required=False, type='bool', aliases=['fail_fast']), - fixtures = dict(default=None, required=False), - liveserver = dict(default=None, required=False, aliases=['live_server']), - testrunner = dict(default=None, required=False, aliases=['test_runner']), - skip = dict(default=None, required=False, type='bool'), - merge = dict(default=None, required=False, type='bool'), - link = dict(default=None, required=False, type='bool'), - ), - ) - - command = module.params['command'] - app_path = module.params['app_path'] - virtualenv = module.params['virtualenv'] - - for param in specific_params: - value = module.params[param] - if param in specific_boolean_params: - value = module.boolean(value) - if value and param not in command_allowed_param_map[command]: - module.fail_json(msg='%s param is incompatible with command=%s' % (param, command)) - - for param in command_required_param_map.get(command, ()): - if not module.params[param]: - module.fail_json(msg='%s param is required for command=%s' % (param, command)) - - venv = module.params['virtualenv'] - - _ensure_virtualenv(module) - - cmd = "python manage.py %s" % (command, ) - - if command in noinput_commands: - cmd = '%s --noinput' % cmd - - for param in general_params: - if module.params[param]: - cmd = '%s --%s=%s' % (cmd, param, module.params[param]) - - for param in specific_boolean_params: - if module.boolean(module.params[param]): - cmd = '%s --%s' % (cmd, param) - - # these params always get tacked on the end of the command - for param in end_of_command_params: - if module.params[param]: - cmd = '%s %s' % (cmd, module.params[param]) - - rc, out, err = module.run_command(cmd, cwd=app_path) - if rc != 0: - if command == 'createcachetable' and 'table' in err and 'already exists' in err: - out = 'Already exists.' - else: - if "Unknown command:" in err: - _fail(module, cmd, err, "Unknown django command: %s" % command) - _fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path) - - changed = False - - lines = out.split('\n') - filt = globals().get(command + "_filter_output", None) - if filt: - filtered_output = filter(filt, out.split('\n')) - if len(filtered_output): - changed = filtered_output - - module.exit_json(changed=changed, out=out, cmd=cmd, app_path=app_path, virtualenv=virtualenv, - settings=module.params['settings'], pythonpath=module.params['pythonpath']) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/web_infrastructure/ejabberd_user b/library/web_infrastructure/ejabberd_user deleted file mode 100755 index d8b0384679c..00000000000 --- a/library/web_infrastructure/ejabberd_user +++ /dev/null @@ -1,214 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2013, Peter Sprygada -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -DOCUMENTATION = ''' ---- -module: ejabberd_user -version_added: "1.5" -author: Peter Sprygada -short_description: Manages users for ejabberd servers -requirements: - - ejabberd with mod_admin_extra -description: - - This module provides user management for ejabberd servers -options: - username: - description: - - the name of the user to manage - required: true - host: - description: - - the ejabberd host associated with this username - required: true - password: - description: - - the password to assign to the username - required: false - logging: - description: - - enables or disables the local syslog facility for this module - required: false - default: false - choices: [ 'true', 'false', 'yes', 'no' ] - state: - description: - - describe the desired state of the user to be managed - required: false - default: 'present' - choices: [ 'present', 'absent' ] -notes: - - Password parameter is required for state == present only - - Passwords must be stored in clear text for this release - - The ejabberd configuration file must include mod_admin_extra as a module. -''' -EXAMPLES = ''' -Example playbook entries using the ejabberd_user module to manage users state. - - tasks: - - - name: create a user if it does not exists - action: ejabberd_user username=test host=server password=password - - - name: delete a user if it exists - action: ejabberd_user username=test host=server state=absent -''' -import syslog - -class EjabberdUserException(Exception): - """ Base exeption for EjabberdUser class object """ - pass - -class EjabberdUser(object): - """ This object represents a user resource for an ejabberd server. The - object manages user creation and deletion using ejabberdctl. The following - commands are currently supported: - * ejabberdctl register - * ejabberdctl deregister - """ - - def __init__(self, module): - self.module = module - self.logging = module.params.get('logging') - self.state = module.params.get('state') - self.host = module.params.get('host') - self.user = module.params.get('username') - self.pwd = module.params.get('password') - - @property - def changed(self): - """ This method will check the current user and see if the password has - changed. It will return True if the user does not match the supplied - credentials and False if it does not - """ - try: - options = [self.user, self.host, self.pwd] - (rc, out, err) = self.run_command('check_password', options) - except EjabberdUserException, e: - (rc, out, err) = (1, None, "required attribute(s) missing") - return rc - - @property - def exists(self): - """ This method will check to see if the supplied username exists for - host specified. If the user exists True is returned, otherwise False - is returned - """ - try: - options = [self.user, self.host] - (rc, out, err) = self.run_command('check_account', options) - except EjabberdUserException, e: - (rc, out, err) = (1, None, "required attribute(s) missing") - return True if rc == 0 else False - - def log(self, entry): - """ This method will log information to the local syslog facility """ - if self.logging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, entry) - - def run_command(self, cmd, options): - """ This method will run the any command specified and return the - returns using the Ansible common module - """ - if not all(options): - raise EjabberdUserException - - cmd = 'ejabberdctl %s ' % cmd - cmd += " ".join(options) - self.log('command: %s' % cmd) - return self.module.run_command(cmd.split()) - - def update(self): - """ The update method will update the credentials for the user provided - """ - try: - options = [self.user, self.host, self.pwd] - (rc, out, err) = self.run_command('change_password', options) - except EjabberdUserException, e: - (rc, out, err) = (1, None, "required attribute(s) missing") - return (rc, out, err) - - def create(self): - """ The create method will create a new user on the host with the - password provided - """ - try: - options = [self.user, self.host, self.pwd] - (rc, out, err) = self.run_command('register', options) - except EjabberdUserException, e: - (rc, out, err) = (1, None, "required attribute(s) missing") - return (rc, out, err) - - def delete(self): - """ The delete method will delete the user from the host - """ - try: - options = [self.user, self.host] - (rc, out, err) = self.run_command('unregister', options) - except EjabberdUserException, e: - (rc, out, err) = (1, None, "required attribute(s) missing") - return (rc, out, err) - -def main(): - module = AnsibleModule( - argument_spec = dict( - host=dict(default=None, type='str'), - username=dict(default=None, type='str'), - password=dict(default=None, type='str'), - state=dict(default='present', choices=['present', 'absent']), - logging=dict(default=False, type='bool') - ), - supports_check_mode = True - ) - - obj = EjabberdUser(module) - - rc = None - result = dict() - - if obj.state == 'absent': - if obj.exists: - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = obj.delete() - if rc != 0: - module.fail_json(msg=err, rc=rc) - - elif obj.state == 'present': - if not obj.exists: - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = obj.create() - elif obj.changed: - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = obj.update() - if rc is not None and rc != 0: - module.fail_json(msg=err, rc=rc) - - if rc is None: - result['changed'] = False - else: - result['changed'] = True - - module.exit_json(**result) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/web_infrastructure/htpasswd b/library/web_infrastructure/htpasswd deleted file mode 100644 index 4a72ea37fec..00000000000 --- a/library/web_infrastructure/htpasswd +++ /dev/null @@ -1,219 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Nimbis Services, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -DOCUMENTATION = """ -module: htpasswd -version_added: "1.3" -short_description: manage user files for basic authentication -description: - - Add and remove username/password entries in a password file using htpasswd. - - This is used by web servers such as Apache and Nginx for basic authentication. -options: - path: - required: true - aliases: [ dest, destfile ] - description: - - Path to the file that contains the usernames and passwords - name: - required: true - aliases: [ username ] - description: - - User name to add or remove - password: - required: false - description: - - Password associated with user. - - Must be specified if user does not exist yet. - crypt_scheme: - required: false - choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] - default: "apr_md5_crypt" - description: - - Encryption scheme to be used. - state: - required: false - choices: [ present, absent ] - default: "present" - description: - - Whether the user entry should be present or not - create: - required: false - choices: [ "yes", "no" ] - default: "yes" - description: - - Used with C(state=present). If specified, the file will be created - if it does not already exist. If set to "no", will fail if the - file does not exist -notes: - - "This module depends on the I(passlib) Python library, which needs to be installed on all target systems." - - "On Debian, Ubuntu, or Fedora: install I(python-passlib)." - - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)." -requires: [ passlib>=1.6 ] -author: Lorin Hochstein -""" - -EXAMPLES = """ -# Add a user to a password file and ensure permissions are set -- htpasswd: path=/etc/nginx/passwdfile name=janedoe password=9s36?;fyNp owner=root group=www-data mode=0640 -# Remove a user from a password file -- htpasswd: path=/etc/apache2/passwdfile name=foobar state=absent -""" - - -import os -from distutils.version import StrictVersion - -try: - from passlib.apache import HtpasswdFile - import passlib -except ImportError: - passlib_installed = False -else: - passlib_installed = True - - -def create_missing_directories(dest): - destpath = os.path.dirname(dest) - if not os.path.exists(destpath): - os.makedirs(destpath) - - -def present(dest, username, password, crypt_scheme, create, check_mode): - """ Ensures user is present - - Returns (msg, changed) """ - if not os.path.exists(dest): - if not create: - raise ValueError('Destination %s does not exist' % dest) - if check_mode: - return ("Create %s" % dest, True) - create_missing_directories(dest) - if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): - ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme) - else: - ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme) - if getattr(ht, 'set_password', None): - ht.set_password(username, password) - else: - ht.update(username, password) - ht.save() - return ("Created %s and added %s" % (dest, username), True) - else: - if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): - ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme) - else: - ht = HtpasswdFile(dest, default=crypt_scheme) - - found = None - if getattr(ht, 'check_password', None): - found = ht.check_password(username, password) - else: - found = ht.verify(username, password) - - if found: - return ("%s already present" % username, False) - else: - if not check_mode: - if getattr(ht, 'set_password', None): - ht.set_password(username, password) - else: - ht.update(username, password) - ht.save() - return ("Add/update %s" % username, True) - - -def absent(dest, username, check_mode): - """ Ensures user is absent - - Returns (msg, changed) """ - if not os.path.exists(dest): - raise ValueError("%s does not exists" % dest) - - if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): - ht = HtpasswdFile(dest, new=False) - else: - ht = HtpasswdFile(dest) - - if username not in ht.users(): - return ("%s not present" % username, False) - else: - if not check_mode: - ht.delete(username) - ht.save() - return ("Remove %s" % username, True) - - -def check_file_attrs(module, changed, message): - - file_args = module.load_file_common_arguments(module.params) - if module.set_fs_attributes_if_different(file_args, False): - - if changed: - message += " and " - changed = True - message += "ownership, perms or SE linux context changed" - - return message, changed - - -def main(): - arg_spec = dict( - path=dict(required=True, aliases=["dest", "destfile"]), - name=dict(required=True, aliases=["username"]), - password=dict(required=False, default=None), - crypt_scheme=dict(required=False, default=None), - state=dict(required=False, default="present"), - create=dict(type='bool', default='yes'), - - ) - module = AnsibleModule(argument_spec=arg_spec, - add_file_common_args=True, - supports_check_mode=True) - - path = module.params['path'] - username = module.params['name'] - password = module.params['password'] - crypt_scheme = module.params['crypt_scheme'] - state = module.params['state'] - create = module.params['create'] - check_mode = module.check_mode - - if not passlib_installed: - module.fail_json(msg="This module requires the passlib Python library") - - try: - if state == 'present': - (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode) - elif state == 'absent': - (msg, changed) = absent(path, username, check_mode) - else: - module.fail_json(msg="Invalid state: %s" % state) - - check_file_attrs(module, changed, msg) - module.exit_json(msg=msg, changed=changed) - except Exception, e: - module.fail_json(msg=str(e)) - - -# import module snippets -from ansible.module_utils.basic import * - -if __name__ == '__main__': - main() diff --git a/library/web_infrastructure/jboss b/library/web_infrastructure/jboss deleted file mode 100644 index 9478235698c..00000000000 --- a/library/web_infrastructure/jboss +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Jeroen Hoekx -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ -module: jboss -version_added: "1.4" -short_description: deploy applications to JBoss -description: - - Deploy applications to JBoss standalone using the filesystem -options: - deployment: - required: true - description: - - The name of the deployment - src: - required: false - description: - - The remote path of the application ear or war to deploy - deploy_path: - required: false - default: /var/lib/jbossas/standalone/deployments - description: - - The location in the filesystem where the deployment scanner listens - state: - required: false - choices: [ present, absent ] - default: "present" - description: - - Whether the application should be deployed or undeployed -notes: - - "The JBoss standalone deployment-scanner has to be enabled in standalone.xml" - - "Ensure no identically named application is deployed through the JBoss CLI" -author: Jeroen Hoekx -""" - -EXAMPLES = """ -# Deploy a hello world application -- jboss: src=/tmp/hello-1.0-SNAPSHOT.war deployment=hello.war state=present -# Update the hello world application -- jboss: src=/tmp/hello-1.1-SNAPSHOT.war deployment=hello.war state=present -# Undeploy the hello world application -- jboss: deployment=hello.war state=absent -""" - -import os -import shutil -import time - -def is_deployed(deploy_path, deployment): - return os.path.exists(os.path.join(deploy_path, "%s.deployed"%(deployment))) - -def is_undeployed(deploy_path, deployment): - return os.path.exists(os.path.join(deploy_path, "%s.undeployed"%(deployment))) - -def is_failed(deploy_path, deployment): - return os.path.exists(os.path.join(deploy_path, "%s.failed"%(deployment))) - -def main(): - module = AnsibleModule( - argument_spec = dict( - src=dict(), - deployment=dict(required=True), - deploy_path=dict(default='/var/lib/jbossas/standalone/deployments'), - state=dict(choices=['absent', 'present'], default='present'), - ), - ) - - changed = False - - src = module.params['src'] - deployment = module.params['deployment'] - deploy_path = module.params['deploy_path'] - state = module.params['state'] - - if state == 'present' and not src: - module.fail_json(msg="Argument 'src' required.") - - if not os.path.exists(deploy_path): - module.fail_json(msg="deploy_path does not exist.") - - deployed = is_deployed(deploy_path, deployment) - - if state == 'present' and not deployed: - if not os.path.exists(src): - module.fail_json(msg='Source file %s does not exist.'%(src)) - if is_failed(deploy_path, deployment): - ### Clean up old failed deployment - os.remove(os.path.join(deploy_path, "%s.failed"%(deployment))) - - shutil.copyfile(src, os.path.join(deploy_path, deployment)) - while not deployed: - deployed = is_deployed(deploy_path, deployment) - if is_failed(deploy_path, deployment): - module.fail_json(msg='Deploying %s failed.'%(deployment)) - time.sleep(1) - changed = True - - if state == 'present' and deployed: - if module.md5(src) != module.md5(os.path.join(deploy_path, deployment)): - os.remove(os.path.join(deploy_path, "%s.deployed"%(deployment))) - shutil.copyfile(src, os.path.join(deploy_path, deployment)) - deployed = False - while not deployed: - deployed = is_deployed(deploy_path, deployment) - if is_failed(deploy_path, deployment): - module.fail_json(msg='Deploying %s failed.'%(deployment)) - time.sleep(1) - changed = True - - if state == 'absent' and deployed: - os.remove(os.path.join(deploy_path, "%s.deployed"%(deployment))) - while deployed: - deployed = not is_undeployed(deploy_path, deployment) - if is_failed(deploy_path, deployment): - module.fail_json(msg='Undeploying %s failed.'%(deployment)) - time.sleep(1) - changed = True - - module.exit_json(changed=changed) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/web_infrastructure/jira b/library/web_infrastructure/jira deleted file mode 100644 index 950fc3dbfcf..00000000000 --- a/library/web_infrastructure/jira +++ /dev/null @@ -1,347 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Steve Smith -# Atlassian open-source approval reference OSR-76. -# -# This file is part of Ansible. -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = """ -module: jira -version_added: "1.6" -short_description: create and modify issues in a JIRA instance -description: - - Create and modify issues in a JIRA instance. - -options: - uri: - required: true - description: - - Base URI for the JIRA instance - - operation: - required: true - aliases: [ command ] - choices: [ create, comment, edit, fetch, transition ] - description: - - The operation to perform. - - username: - required: true - description: - - The username to log-in with. - - password: - required: true - description: - - The password to log-in with. - - project: - aliases: [ prj ] - required: false - description: - - The project for this operation. Required for issue creation. - - summary: - required: false - description: - - The issue summary, where appropriate. - - description: - required: false - description: - - The issue description, where appropriate. - - issuetype: - required: false - description: - - The issue type, for issue creation. - - issue: - required: false - description: - - An existing issue key to operate on. - - comment: - required: false - description: - - The comment text to add. - - status: - required: false - description: - - The desired status; only relevant for the transition operation. - - assignee: - required: false - description: - - Sets the assignee on create or transition operations. Note not all transitions will allow this. - - fields: - required: false - description: - - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API (possibly after merging with other required data, as when passed to create). See examples for more information, and the JIRA REST API for the structure required for various fields. - -notes: - - "Currently this only works with basic-auth." - -author: Steve Smith -""" - -EXAMPLES = """ -# Create a new issue and add a comment to it: -- name: Create an issue - jira: uri={{server}} username={{user}} password={{pass}} - project=ANS operation=create - summary="Example Issue" description="Created using Ansible" issuetype=Task - register: issue - -- name: Comment on issue - jira: uri={{server}} username={{user}} password={{pass}} - issue={{issue.meta.key}} operation=comment - comment="A comment added by Ansible" - -# Assign an existing issue using edit -- name: Assign an issue using free-form fields - jira: uri={{server}} username={{user}} password={{pass}} - issue={{issue.meta.key}} operation=edit - assignee=ssmith - -# Create an issue with an existing assignee -- name: Create an assigned issue - jira: uri={{server}} username={{user}} password={{pass}} - project=ANS operation=create - summary="Assigned issue" description="Created and assigned using Ansible" - issuetype=Task assignee=ssmith - -# Edit an issue using free-form fields -- name: Set the labels on an issue using free-form fields - jira: uri={{server}} username={{user}} password={{pass}} - issue={{issue.meta.key}} operation=edit - args: { fields: {labels: ["autocreated", "ansible"]}} - -- name: Set the labels on an issue, YAML version - jira: uri={{server}} username={{user}} password={{pass}} - issue={{issue.meta.key}} operation=edit - args: - fields: - labels: - - "autocreated" - - "ansible" - - "yaml" - -# Retrieve metadata for an issue and use it to create an account -- name: Get an issue - jira: uri={{server}} username={{user}} password={{pass}} - project=ANS operation=fetch issue="ANS-63" - register: issue - -- name: Create a unix account for the reporter - sudo: true - user: name="{{issue.meta.fields.creator.name}}" comment="{{issue.meta.fields.creator.displayName}}" - -# Transition an issue by target status -- name: Close the issue - jira: uri={{server}} username={{user}} password={{pass}} - issue={{issue.meta.key}} operation=transition status="Done" -""" - -import json -import base64 - -def request(url, user, passwd, data=None, method=None): - if data: - data = json.dumps(data) - - # NOTE: fetch_url uses a password manager, which follows the - # standard request-then-challenge basic-auth semantics. However as - # JIRA allows some unauthorised operations it doesn't necessarily - # send the challenge, so the request occurs as the anonymous user, - # resulting in unexpected results. To work around this we manually - # inject the basic-auth header up-front to ensure that JIRA treats - # the requests as authorized for this user. - auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '') - response, info = fetch_url(module, url, data=data, method=method, - headers={'Content-Type':'application/json', - 'Authorization':"Basic %s" % auth}) - - if info['status'] not in (200, 204): - module.fail_json(msg=info['msg']) - - body = response.read() - - if body: - return json.loads(body) - else: - return {} - -def post(url, user, passwd, data): - return request(url, user, passwd, data=data, method='POST') - -def put(url, user, passwd, data): - return request(url, user, passwd, data=data, method='PUT') - -def get(url, user, passwd): - return request(url, user, passwd) - - -def create(restbase, user, passwd, params): - createfields = { - 'project': { 'key': params['project'] }, - 'summary': params['summary'], - 'description': params['description'], - 'issuetype': { 'name': params['issuetype'] }} - - # Merge in any additional or overridden fields - if params['fields']: - createfields.update(params['fields']) - - data = {'fields': createfields} - - url = restbase + '/issue/' - - ret = post(url, user, passwd, data) - - return ret - - -def comment(restbase, user, passwd, params): - data = { - 'body': params['comment'] - } - - url = restbase + '/issue/' + params['issue'] + '/comment' - - ret = post(url, user, passwd, data) - - return ret - - -def edit(restbase, user, passwd, params): - data = { - 'fields': params['fields'] - } - - url = restbase + '/issue/' + params['issue'] - - ret = put(url, user, passwd, data) - - return ret - - -def fetch(restbase, user, passwd, params): - url = restbase + '/issue/' + params['issue'] - ret = get(url, user, passwd) - return ret - - -def transition(restbase, user, passwd, params): - # Find the transition id - turl = restbase + '/issue/' + params['issue'] + "/transitions" - tmeta = get(turl, user, passwd) - - target = params['status'] - tid = None - for t in tmeta['transitions']: - if t['name'] == target: - tid = t['id'] - break - - if not tid: - raise ValueError("Failed find valid transition for '%s'" % target) - - # Perform it - url = restbase + '/issue/' + params['issue'] + "/transitions" - data = { 'transition': { "id" : tid }, - 'fields': params['fields']} - - ret = post(url, user, passwd, data) - - return ret - - -# Some parameters are required depending on the operation: -OP_REQUIRED = dict(create=['project', 'issuetype', 'summary', 'description'], - comment=['issue', 'comment'], - edit=[], - fetch=['issue'], - transition=['status']) - -def main(): - - global module - module = AnsibleModule( - argument_spec=dict( - uri=dict(required=True), - operation=dict(choices=['create', 'comment', 'edit', 'fetch', 'transition'], - aliases=['command'], required=True), - username=dict(required=True), - password=dict(required=True), - project=dict(), - summary=dict(), - description=dict(), - issuetype=dict(), - issue=dict(aliases=['ticket']), - comment=dict(), - status=dict(), - assignee=dict(), - fields=dict(default={}) - ), - supports_check_mode=False - ) - - op = module.params['operation'] - - # Check we have the necessary per-operation parameters - missing = [] - for parm in OP_REQUIRED[op]: - if not module.params[parm]: - missing.append(parm) - if missing: - module.fail_json(msg="Operation %s require the following missing parameters: %s" % (op, ",".join(missing))) - - # Handle rest of parameters - uri = module.params['uri'] - user = module.params['username'] - passwd = module.params['password'] - if module.params['assignee']: - module.params['fields']['assignee'] = { 'name': module.params['assignee'] } - - if not uri.endswith('/'): - uri = uri+'/' - restbase = uri + 'rest/api/2' - - # Dispatch - try: - - # Lookup the corresponding method for this operation. This is - # safe as the AnsibleModule should remove any unknown operations. - thismod = sys.modules[__name__] - method = getattr(thismod, op) - - ret = method(restbase, user, passwd, module.params) - - except Exception as e: - return module.fail_json(msg=e.message) - - - module.exit_json(changed=True, meta=ret) - - -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() diff --git a/library/web_infrastructure/supervisorctl b/library/web_infrastructure/supervisorctl deleted file mode 100644 index 2d458169e76..00000000000 --- a/library/web_infrastructure/supervisorctl +++ /dev/null @@ -1,221 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Matt Wright -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -import os - -DOCUMENTATION = ''' ---- -module: supervisorctl -short_description: Manage the state of a program or group of programs running via supervisord -description: - - Manage the state of a program or group of programs running via supervisord -version_added: "0.7" -options: - name: - description: - - The name of the supervisord program or group to manage. - - The name will be taken as group name when it ends with a colon I(:) - - Group support is only available in Ansible version 1.6 or later. - required: true - default: null - config: - description: - - The supervisor configuration file path - required: false - default: null - version_added: "1.3" - server_url: - description: - - URL on which supervisord server is listening - required: false - default: null - version_added: "1.3" - username: - description: - - username to use for authentication - required: false - default: null - version_added: "1.3" - password: - description: - - password to use for authentication - required: false - default: null - version_added: "1.3" - state: - description: - - The desired state of program/group. - required: true - default: null - choices: [ "present", "started", "stopped", "restarted" ] - supervisorctl_path: - description: - - path to supervisorctl executable - required: false - default: null - version_added: "1.4" -notes: - - When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist. - - When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart). -requirements: [ "supervisorctl" ] -author: Matt Wright, Aaron Wang -''' - -EXAMPLES = ''' -# Manage the state of program to be in 'started' state. -- supervisorctl: name=my_app state=started - -# Manage the state of program group to be in 'started' state. -- supervisorctl: name='my_apps:' state=started - -# Restart my_app, reading supervisorctl configuration from a specified file. -- supervisorctl: name=my_app state=restarted config=/var/opt/my_project/supervisord.conf - -# Restart my_app, connecting to supervisord with credentials and server URL. -- supervisorctl: name=my_app state=restarted username=test password=testpass server_url=http://localhost:9001 -''' - - -def main(): - arg_spec = dict( - name=dict(required=True), - config=dict(required=False), - server_url=dict(required=False), - username=dict(required=False), - password=dict(required=False), - supervisorctl_path=dict(required=False), - state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped']) - ) - - module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) - - name = module.params['name'] - is_group = False - if name.endswith(':'): - is_group = True - name = name.rstrip(':') - state = module.params['state'] - config = module.params.get('config') - server_url = module.params.get('server_url') - username = module.params.get('username') - password = module.params.get('password') - supervisorctl_path = module.params.get('supervisorctl_path') - - if supervisorctl_path: - supervisorctl_path = os.path.expanduser(supervisorctl_path) - if os.path.exists(supervisorctl_path) and module.is_executable(supervisorctl_path): - supervisorctl_args = [supervisorctl_path] - else: - module.fail_json( - msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path) - else: - supervisorctl_args = [module.get_bin_path('supervisorctl', True)] - - if config: - supervisorctl_args.extend(['-c', os.path.expanduser(config)]) - if server_url: - supervisorctl_args.extend(['-s', server_url]) - if username: - supervisorctl_args.extend(['-u', username]) - if password: - supervisorctl_args.extend(['-p', password]) - - def run_supervisorctl(cmd, name=None, **kwargs): - args = list(supervisorctl_args) # copy the master args - args.append(cmd) - if name: - args.append(name) - return module.run_command(args, **kwargs) - - def get_matched_processes(): - matched = [] - rc, out, err = run_supervisorctl('status') - for line in out.splitlines(): - # One status line may look like one of these two: - # process not in group: - # echo_date_lonely RUNNING pid 7680, uptime 13:22:18 - # process in group: - # echo_date_group:echo_date_00 RUNNING pid 7681, uptime 13:22:18 - fields = [field for field in line.split(' ') if field != ''] - process_name = fields[0] - status = fields[1] - - if is_group: - # If there is ':', this process must be in a group. - if ':' in process_name: - group = process_name.split(':')[0] - if group != name: - continue - else: - continue - else: - if process_name != name: - continue - - matched.append((process_name, status)) - return matched - - def take_action_on_processes(processes, status_filter, action, expected_result): - to_take_action_on = [] - for process_name, status in processes: - if status_filter(status): - to_take_action_on.append(process_name) - - if len(to_take_action_on) == 0: - module.exit_json(changed=False, name=name, state=state) - if module.check_mode: - module.exit_json(changed=True) - for process_name in to_take_action_on: - rc, out, err = run_supervisorctl(action, process_name) - if '%s: %s' % (process_name, expected_result) not in out: - module.fail_json(msg=out) - - module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on) - - if state == 'restarted': - rc, out, err = run_supervisorctl('update') - processes = get_matched_processes() - take_action_on_processes(processes, lambda s: True, 'restart', 'started') - - processes = get_matched_processes() - - if state == 'present': - if len(processes) > 0: - module.exit_json(changed=False, name=name, state=state) - - if module.check_mode: - module.exit_json(changed=True) - run_supervisorctl('reread', check_rc=True) - rc, out, err = run_supervisorctl('add', name) - if '%s: added process group' % name in out: - module.exit_json(changed=True, name=name, state=state) - else: - module.fail_json(msg=out, name=name, state=state) - - if state == 'started': - take_action_on_processes(processes, lambda s: s != 'RUNNING', 'start', 'started') - - if state == 'stopped': - take_action_on_processes(processes, lambda s: s == 'RUNNING', 'stop', 'stopped') - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/windows/setup.ps1 b/library/windows/setup.ps1 deleted file mode 100644 index c249251d974..00000000000 --- a/library/windows/setup.ps1 +++ /dev/null @@ -1,100 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -# $params is not currently used in this module -# $params = Parse-Args $args; - -$result = New-Object psobject @{ - ansible_facts = New-Object psobject - changed = $false -}; - -$osversion = [Environment]::OSVersion -$memory = @() -$memory += Get-WmiObject win32_Physicalmemory -$capacity = 0 -$memory | foreach {$capacity += $_.Capacity} -$netcfg = Get-WmiObject win32_NetworkAdapterConfiguration - -$ActiveNetcfg = @(); $ActiveNetcfg+= $netcfg | where {$_.ipaddress -ne $null} -$formattednetcfg = @() -foreach ($adapter in $ActiveNetcfg) -{ - $thisadapter = New-Object psobject @{ - interface_name = $adapter.description - dns_domain = $adapter.dnsdomain - default_gateway = $null - interface_index = $adapter.InterfaceIndex - } - - if ($adapter.defaultIPGateway) - { - $thisadapter.default_gateway = $adapter.DefaultIPGateway[0].ToString() - } - - $formattednetcfg += $thisadapter;$thisadapter = $null -} - -Set-Attr $result.ansible_facts "ansible_interfaces" $formattednetcfg - -Set-Attr $result.ansible_facts "ansible_hostname" $env:COMPUTERNAME; -Set-Attr $result.ansible_facts "ansible_fqdn" "$([System.Net.Dns]::GetHostByName((hostname)).HostName)" -Set-Attr $result.ansible_facts "ansible_system" $osversion.Platform.ToString() -Set-Attr $result.ansible_facts "ansible_os_family" "Windows" -Set-Attr $result.ansible_facts "ansible_distribution" $osversion.VersionString -Set-Attr $result.ansible_facts "ansible_distribution_version" $osversion.Version.ToString() - -Set-Attr $result.ansible_facts "ansible_totalmem" $capacity - -$ips = @() -Foreach ($ip in $netcfg.IPAddress) { If ($ip) { $ips += $ip } } -Set-Attr $result.ansible_facts "ansible_ip_addresses" $ips - -$psversion = $PSVersionTable.PSVersion.Major -Set-Attr $result.ansible_facts "ansible_powershell_version" $psversion - -$winrm_https_listener_parent_path = Get-ChildItem -Path WSMan:\localhost\Listener -Recurse | Where-Object {$_.PSChildName -eq "Transport" -and $_.Value -eq "HTTPS"} | select PSParentPath - -if ($winrm_https_listener_parent_path ) { - $winrm_https_listener_path = $winrm_https_listener_parent_path.PSParentPath.Substring($winrm_https_listener_parent_path.PSParentPath.LastIndexOf("\")) -} - -if ($winrm_https_listener_path) -{ - $https_listener = Get-ChildItem -Path "WSMan:\localhost\Listener$winrm_https_listener_path" -} - -if ($https_listener) -{ - $winrm_cert_thumbprint = $https_listener | where {$_.Name -EQ "CertificateThumbprint" } | select Value -} - -if ($winrm_cert_thumbprint) -{ - $uppercase_cert_thumbprint = $winrm_cert_thumbprint.Value.ToString().ToUpper() -} - -$winrm_cert_expiry = Get-ChildItem -Path Cert:\LocalMachine\My | where Thumbprint -EQ $uppercase_cert_thumbprint | select NotAfter - -if ($winrm_cert_expiry) -{ - Set-Attr $result.ansible_facts "ansible_winrm_certificate_expires" $winrm_cert_expiry.NotAfter.ToString("yyyy-MM-dd HH:mm:ss") -} - -Exit-Json $result; diff --git a/library/windows/slurp.ps1 b/library/windows/slurp.ps1 deleted file mode 100644 index edf1da7635f..00000000000 --- a/library/windows/slurp.ps1 +++ /dev/null @@ -1,46 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -$src = Get-Attr $params "src" (Get-Attr $params "path" $FALSE); -If (-not $src) -{ - Fail-Json (New-Object psobject) "missing required argument: src"; -} - -If (Test-Path -PathType Leaf $src) -{ - $bytes = [System.IO.File]::ReadAllBytes($src); - $content = [System.Convert]::ToBase64String($bytes); - $result = New-Object psobject @{ - changed = $false - encoding = "base64" - content = $content - }; - Exit-Json $result; -} -ElseIf (Test-Path -PathType Container $src) -{ - Fail-Json (New-Object psobject) ("is a directory: " + $src); -} -Else -{ - Fail-Json (New-Object psobject) ("file not found: " + $src); -} diff --git a/library/windows/win_feature b/library/windows/win_feature deleted file mode 100644 index ef344ee3b22..00000000000 --- a/library/windows/win_feature +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Paul Durivage , Trond Hindenes and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# this is a windows documentation stub. actual code lives in the .ps1 -# file of the same name - -DOCUMENTATION = ''' ---- -module: win_feature -version_added: "1.7" -short_description: Installs and uninstalls Windows Features -description: - - Installs or uninstalls Windows Roles or Features -options: - name: - description: - - Names of roles or features to install as a single feature or a comma-separated list of features - required: true - default: null - aliases: [] - state: - description: - - State of the features or roles on the system - required: false - choices: - - present - - absent - default: present - aliases: [] - restart: - description: - - Restarts the computer automatically when installation is complete, if restarting is required by the roles or features installed. - choices: - - yes - - no - default: null - aliases: [] - include_sub_features: - description: - - Adds all subfeatures of the specified feature - choices: - - yes - - no - default: null - aliases: [] - include_management_tools: - description: - - Adds the corresponding management tools to the specified feature - choices: - - yes - - no - default: null - aliases: [] -author: Paul Durivage / Trond Hindenes -''' - -EXAMPLES = ''' -# This installs IIS. -# The names of features available for install can be run by running the following Powershell Command: -# PS C:\Users\Administrator> Import-Module ServerManager; Get-WindowsFeature -$ ansible -i hosts -m win_feature -a "name=Web-Server" all -$ ansible -i hosts -m win_feature -a "name=Web-Server,Web-Common-Http" all - - -# Playbook example ---- -- name: Install IIS - hosts: all - gather_facts: false - tasks: - - name: Install IIS - win_feature: - name: "Web-Server" - state: absent - restart: yes - include_sub_features: yes - include_management_tools: yes - - -''' diff --git a/library/windows/win_feature.ps1 b/library/windows/win_feature.ps1 deleted file mode 100644 index a0776a4bf1a..00000000000 --- a/library/windows/win_feature.ps1 +++ /dev/null @@ -1,122 +0,0 @@ -#!powershell -# This file is part of Ansible. -# -# Copyright 2014, Paul Durivage -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -Import-Module Servermanager; - -$params = Parse-Args $args; - -$result = New-Object psobject @{ - changed = $false -} - -If ($params.name) { - $name = $params.name -} -Else { - Fail-Json $result "mising required argument: name" -} - -If ($params.state) { - $state = $params.state.ToString().ToLower() - If (($state -ne 'present') -and ($state -ne 'absent')) { - Fail-Json $result "state is '$state'; must be 'present' or 'absent'" - } -} -Elseif (!$params.state) { - $state = "present" -} - -If ($params.restart) { - $restart = $params.restart | ConvertTo-Bool -} -Else -{ - $restart = $false -} - -if ($params.include_sub_features) -{ - $includesubfeatures = $params.include_sub_features | ConvertTo-Bool -} -Else -{ - $includesubfeatures = $false -} - -if ($params.include_management_tools) -{ - $includemanagementtools = $params.include_management_tools | ConvertTo-Bool -} -Else -{ - $includemanagementtools = $false -} - - - -If ($state -eq "present") { - try { - $featureresult = Add-WindowsFeature -Name $name -Restart:$restart -IncludeAllSubFeature:$includesubfeatures -IncludeManagementTools:$includemanagementtools - } - catch { - Fail-Json $result $_.Exception.Message - } -} -Elseif ($state -eq "absent") { - try { - $featureresult = Remove-WindowsFeature -Name $name -Restart:$restart - } - catch { - Fail-Json $result $_.Exception.Message - } -} - -# Loop through results and create a hash containing details about -# each role/feature that is installed/removed -$installed_features = @() -#$featureresult.featureresult is filled if anything was changed -if ($featureresult.FeatureResult) -{ - ForEach ($item in $featureresult.FeatureResult) { - $installed_features += New-Object psobject @{ - id = $item.id.ToString() - display_name = $item.DisplayName - message = $item.Message.ToString() - restart_needed = $item.RestartNeeded.ToString() - skip_reason = $item.SkipReason.ToString() - success = $item.Success.ToString() - } - } - Set-Attr $result "feature_result" $installed_features - - - $result.changed = $true -} -Else -{ - Set-Attr $result "feature_result" $null -} - -Set-Attr $result "feature_success" $featureresult.Success.ToString() -Set-Attr $result "feature_exitcode" $featureresult.ExitCode.ToString() -Set-Attr $result "feature_restart_needed" $featureresult.RestartNeeded.ToString() - -Exit-Json $result; diff --git a/library/windows/win_get_url b/library/windows/win_get_url deleted file mode 100644 index 10910cf605e..00000000000 --- a/library/windows/win_get_url +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Paul Durivage , and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# this is a windows documentation stub. actual code lives in the .ps1 -# file of the same name - -DOCUMENTATION = ''' ---- -module: win_get_url -version_added: "1.7" -short_description: Fetches a file from a given URL -description: - - Fetches a file from a URL and saves to locally -options: - url: - description: - - The full URL of a file to download - required: true - default: null - aliases: [] - dest: - description: - - The absolute path of the location to save the file at the URL. Be sure to include a filename and extension as appropriate. - required: false - default: yes - aliases: [] -author: Paul Durivage -''' - -EXAMPLES = ''' -# Downloading a JPEG and saving it to a file with the ansible command. -# Note the "dest" is quoted rather instead of escaping the backslashes -$ ansible -i hosts -c winrm -m win_get_url -a "url=http://www.example.com/earthrise.jpg dest='C:\Users\Administrator\earthrise.jpg'" all - -# Playbook example -- name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg' - win_get_url: - url: 'http://www.example.com/earthrise.jpg' - dest: 'C:\Users\RandomUser\earthrise.jpg' -''' diff --git a/library/windows/win_get_url.ps1 b/library/windows/win_get_url.ps1 deleted file mode 100644 index b555cc7a52c..00000000000 --- a/library/windows/win_get_url.ps1 +++ /dev/null @@ -1,56 +0,0 @@ -#!powershell -# This file is part of Ansible. -# -# Copyright 2014, Paul Durivage -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -$result = New-Object psobject @{ - win_get_url = New-Object psobject - changed = $false -} - -If ($params.url) { - $url = $params.url -} -Else { - Fail-Json $result "mising required argument: url" -} - -If ($params.dest) { - $dest = $params.dest -} -Else { - Fail-Json $result "missing required argument: dest" -} - -$client = New-Object System.Net.WebClient - -Try { - $client.DownloadFile($url, $dest) - $result.changed = $true -} -Catch { - Fail-Json $result "Error downloading $url to $dest" -} - -Set-Attr $result.win_get_url "url" $url -Set-Attr $result.win_get_url "dest" $dest - -Exit-Json $result; diff --git a/library/windows/win_group b/library/windows/win_group deleted file mode 100644 index 2013b52be53..00000000000 --- a/library/windows/win_group +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Chris Hoffman -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# this is a windows documentation stub. actual code lives in the .ps1 -# file of the same name - -DOCUMENTATION = ''' ---- -module: win_group -version_added: "1.7" -short_description: Add and remove local groups -description: - - Add and remove local groups -options: - name: - description: - - Name of the group - required: true - default: null - aliases: [] - description: - description: - - Description of the group - required: false - default: null - aliases: [] - state: - description: - - Create or remove the group - required: false - choices: - - present - - absent - default: present - aliases: [] -author: Chris Hoffman -''' - -EXAMPLES = ''' - # Create a new group - win_group: - name: deploy - description: Deploy Group - state: present - - # Remove a group - win_group: - name: deploy - state: absent -''' diff --git a/library/windows/win_group.ps1 b/library/windows/win_group.ps1 deleted file mode 100644 index febaf47d014..00000000000 --- a/library/windows/win_group.ps1 +++ /dev/null @@ -1,70 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Copyright 2014, Chris Hoffman -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -$result = New-Object PSObject; -Set-Attr $result "changed" $false; - -If (-not $params.name.GetType) { - Fail-Json $result "missing required arguments: name" -} - -If ($params.state) { - $state = $params.state.ToString().ToLower() - If (($state -ne "present") -and ($state -ne "absent")) { - Fail-Json $result "state is '$state'; must be 'present' or 'absent'" - } -} -Elseif (-not $params.state) { - $state = "present" -} - -$adsi = [ADSI]"WinNT://$env:COMPUTERNAME" -$group = $adsi.Children | Where-Object {$_.SchemaClassName -eq 'group' -and $_.Name -eq $params.name } - -try { - If ($state -eq "present") { - If (-not $group) { - $group = $adsi.Create("Group", $params.name) - $group.SetInfo() - - Set-Attr $result "changed" $true - } - - If ($params.description.GetType) { - IF (-not $group.description -or $group.description -ne $params.description) { - $group.description = $params.description - $group.SetInfo() - Set-Attr $result "changed" $true - } - } - } - ElseIf ($state -eq "absent" -and $group) { - $adsi.delete("Group", $group.Name.Value) - Set-Attr $result "changed" $true - } -} -catch { - Fail-Json $result $_.Exception.Message -} - -Exit-Json $result diff --git a/library/windows/win_msi b/library/windows/win_msi deleted file mode 100644 index 9eb6f1bafa5..00000000000 --- a/library/windows/win_msi +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Matt Martz , and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# this is a windows documentation stub. actual code lives in the .ps1 -# file of the same name - -DOCUMENTATION = ''' ---- -module: win_msi -version_added: "1.7" -short_description: Installs and uninstalls Windows MSI files -description: - - Installs or uninstalls a Windows MSI file that is already located on the - target server -options: - path: - description: - - File system path to the MSI file to install - required: true - state: - description: - - Whether the MSI file should be installed or uninstalled - choices: - - present - - absent - default: present - creates: - description: - - Path to a file created by installing the MSI to prevent from - attempting to reinstall the package on every run -author: Matt Martz -''' - -EXAMPLES = ''' -# Install an MSI file -- win_msi: path=C:\\\\7z920-x64.msi - -# Uninstall an MSI file -- win_msi: path=C:\\\\7z920-x64.msi state=absent -''' - diff --git a/library/windows/win_msi.ps1 b/library/windows/win_msi.ps1 deleted file mode 100644 index 1c2bc8a3019..00000000000 --- a/library/windows/win_msi.ps1 +++ /dev/null @@ -1,63 +0,0 @@ -#!powershell -# (c) 2014, Matt Martz , and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -$result = New-Object psobject; -Set-Attr $result "changed" $false; - -If (-not $params.path.GetType) -{ - Fail-Json $result "missing required arguments: path" -} - -$extra_args = "" -If ($params.extra_args.GetType) -{ - $extra_args = $params.extra_args; -} - -If ($params.creates.GetType -and $params.state.GetType -and $params.state -ne "absent") -{ - If (Test-File $creates) - { - Exit-Json $result; - } -} - -$logfile = [IO.Path]::GetTempFileName(); -if ($params.state.GetType -and $params.state -eq "absent") -{ - msiexec.exe /x $params.path /qb /l $logfile $extra_args; -} -Else -{ - msiexec.exe /i $params.path /qb /l $logfile $extra_args; -} - -Set-Attr $result "changed" $true; - -$logcontents = Get-Content $logfile; -Remove-Item $logfile; - -Set-Attr $result "log" $logcontents; - -Exit-Json $result; diff --git a/library/windows/win_ping b/library/windows/win_ping deleted file mode 100644 index de32877d615..00000000000 --- a/library/windows/win_ping +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan , and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# this is a windows documentation stub. actual code lives in the .ps1 -# file of the same name - -DOCUMENTATION = ''' ---- -module: win_ping -version_added: "1.7" -short_description: A windows version of the classic ping module. -description: - - Checks management connectivity of a windows host -options: - data: - description: - - Alternate data to return instead of 'pong' - required: false - default: 'pong' - aliases: [] -author: Chris Church -''' - -EXAMPLES = ''' -# Test connectivity to a windows host -ansible winserver -m win_ping - -# Example from an Ansible Playbook -- action: win_ping -''' - diff --git a/library/windows/win_service b/library/windows/win_service deleted file mode 100644 index c378be120b1..00000000000 --- a/library/windows/win_service +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Chris Hoffman -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# this is a windows documentation stub. actual code lives in the .ps1 -# file of the same name - -DOCUMENTATION = ''' ---- -module: win_service -version_added: "1.7" -short_description: Manages Windows services -description: - - Manages Windows services -options: - name: - description: - - Name of the service - required: true - default: null - aliases: [] - start_mode: - description: - - Set the startup type for the service - required: false - choices: - - auto - - manual - - disabled - state: - description: - - C(started)/C(stopped) are idempotent actions that will not run - commands unless necessary. C(restarted) will always bounce the - service. - required: false - choices: - - started - - stopped - - restarted - default: null - aliases: [] -author: Chris Hoffman -''' - -EXAMPLES = ''' - # Restart a service - win_service: - name: spooler - state: restarted - - # Set service startup mode to auto and ensure it is started - win_service: - name: spooler - start_mode: auto - state: started -''' diff --git a/library/windows/win_service.ps1 b/library/windows/win_service.ps1 deleted file mode 100644 index a70d82a4ef3..00000000000 --- a/library/windows/win_service.ps1 +++ /dev/null @@ -1,106 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Copyright 2014, Chris Hoffman -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -$result = New-Object PSObject; -Set-Attr $result "changed" $false; - -If (-not $params.name.GetType) -{ - Fail-Json $result "missing required arguments: name" -} - -If ($params.state) { - $state = $params.state.ToString().ToLower() - If (($state -ne 'started') -and ($state -ne 'stopped') -and ($state -ne 'restarted')) { - Fail-Json $result "state is '$state'; must be 'started', 'stopped', or 'restarted'" - } -} - -If ($params.start_mode) { - $startMode = $params.start_mode.ToString().ToLower() - If (($startMode -ne 'auto') -and ($startMode -ne 'manual') -and ($startMode -ne 'disabled')) { - Fail-Json $result "start mode is '$startMode'; must be 'auto', 'manual', or 'disabled'" - } -} - -$svcName = $params.name -$svc = Get-Service -Name $svcName -ErrorAction SilentlyContinue -If (-not $svc) { - Fail-Json $result "Service '$svcName' not installed" -} -# Use service name instead of display name for remaining actions. -If ($svcName -ne $svc.ServiceName) { - $svcName = $svc.ServiceName -} - -Set-Attr $result "name" $svc.ServiceName -Set-Attr $result "display_name" $svc.DisplayName - -$svcMode = Get-WmiObject -Class Win32_Service -Property StartMode -Filter "Name='$svcName'" -If ($startMode) { - If ($svcMode.StartMode.ToLower() -ne $startMode) { - Set-Service -Name $svcName -StartupType $startMode - Set-Attr $result "changed" $true - Set-Attr $result "start_mode" $startMode - } - Else { - Set-Attr $result "start_mode" $svcMode.StartMode.ToLower() - } -} -Else { - Set-Attr $result "start_mode" $svcMode.StartMode.ToLower() -} - -If ($state) { - If ($state -eq "started" -and $svc.Status -ne "Running") { - try { - Start-Service -Name $svcName -ErrorAction Stop - } - catch { - Fail-Json $result $_.Exception.Message - } - Set-Attr $result "changed" $true; - } - ElseIf ($state -eq "stopped" -and $svc.Status -ne "Stopped") { - try { - Stop-Service -Name $svcName -ErrorAction Stop - } - catch { - Fail-Json $result $_.Exception.Message - } - Set-Attr $result "changed" $true; - } - ElseIf ($state -eq "restarted") { - try { - Restart-Service -Name $svcName -ErrorAction Stop - } - catch { - Fail-Json $result $_.Exception.Message - } - Set-Attr $result "changed" $true; - } -} -$svc.Refresh() -Set-Attr $result "state" $svc.Status.ToString().ToLower() - -Exit-Json $result; diff --git a/library/windows/win_stat b/library/windows/win_stat deleted file mode 100644 index c98cd55f599..00000000000 --- a/library/windows/win_stat +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# this is a windows documentation stub, actual code lives in the .ps1 -# file of the same name - -DOCUMENTATION = ''' ---- -module: win_stat -version_added: "1.7" -short_description: returns information about a Windows file -description: - - Returns information about a Windows file -options: - path: - description: - - The full path of the file/object to get the facts of; both forward and - back slashes are accepted. - required: true - default: null - aliases: [] - get_md5: - description: - - Whether to return the md5 sum of the file - required: false - default: yes - aliases: [] -author: Chris Church -''' - -EXAMPLES = ''' -# Obtain information about a file - -- win_stat: path=C:\\foo.ini - register: file_info - -- debug: var=file_info -''' - diff --git a/library/windows/win_stat.ps1 b/library/windows/win_stat.ps1 deleted file mode 100644 index 4e4c55b2aa3..00000000000 --- a/library/windows/win_stat.ps1 +++ /dev/null @@ -1,63 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -$path = Get-Attr $params "path" $FALSE; -If ($path -eq $FALSE) -{ - Fail-Json (New-Object psobject) "missing required argument: path"; -} - -$get_md5 = Get-Attr $params "get_md5" $TRUE | ConvertTo-Bool; - -$result = New-Object psobject @{ - stat = New-Object psobject - changed = $false -}; - -If (Test-Path $path) -{ - Set-Attr $result.stat "exists" $TRUE; - $info = Get-Item $path; - If ($info.Directory) # Only files have the .Directory attribute. - { - Set-Attr $result.stat "isdir" $FALSE; - Set-Attr $result.stat "size" $info.Length; - } - Else - { - Set-Attr $result.stat "isdir" $TRUE; - } -} -Else -{ - Set-Attr $result.stat "exists" $FALSE; -} - -If ($get_md5 -and $result.stat.exists -and -not $result.stat.isdir) -{ - $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; - $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); - $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); - $fp.Dispose(); - Set-Attr $result.stat "md5" $hash; -} - -Exit-Json $result; diff --git a/library/windows/win_user b/library/windows/win_user deleted file mode 100644 index e2da6a1ddb8..00000000000 --- a/library/windows/win_user +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Matt Martz , and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# this is a windows documentation stub. actual code lives in the .ps1 -# file of the same name - -DOCUMENTATION = ''' ---- -module: win_user -version_added: "1.7" -short_description: Manages local Windows user accounts -description: - - Manages local Windows user accounts -options: - name: - description: - - Username of the user to manage - required: true - default: null - aliases: [] - password: - description: - - Password for the user (plain text) - required: true - default: null - aliases: [] - state: - description: - - Whether to create or delete a user - required: false - choices: - - present - - absent - default: present - aliases: [] -author: Paul Durivage -''' - -EXAMPLES = ''' -# Ad-hoc example -$ ansible -i hosts -m win_user -a "name=bob password=Password12345" all -$ ansible -i hosts -m win_user -a "name=bob password=Password12345 state=absent" all - -# Playbook example ---- -- name: Add a user - hosts: all - gather_facts: false - tasks: - - name: Add User - win_user: - name: ansible - password: "@ns1bl3" -''' diff --git a/library/windows/win_user.ps1 b/library/windows/win_user.ps1 deleted file mode 100644 index 306d7a0db2f..00000000000 --- a/library/windows/win_user.ps1 +++ /dev/null @@ -1,116 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Copyright 2014, Paul Durivage -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -######## -$adsi = [ADSI]"WinNT://$env:COMPUTERNAME" - -function Get-User($user) { - $adsi.Children | where {$_.SchemaClassName -eq 'user' -and $_.Name -eq $user } - return -} - -function Create-User([string]$user, [string]$passwd) { - $adsiuser = $adsi.Create("User", $user) - $adsiuser.SetPassword($passwd) - $adsiuser.SetInfo() - $adsiuser - return -} - -function Update-Password($user, [string]$passwd) { - $user.SetPassword($passwd) - $user.SetInfo() -} - -function Delete-User($user) { - $adsi.delete("user", $user.Name.Value) -} -######## - -$params = Parse-Args $args; - -$result = New-Object psobject @{ - changed = $false -}; - -If (-not $params.name.GetType) -{ - Fail-Json $result "missing required arguments: name" -} - -If ($params.state) { - $state = $params.state.ToString().ToLower() - If (($state -ne 'present') -and ($state -ne 'absent')) { - Fail-Json $result "state is '$state'; must be 'present' or 'absent'" - } -} -Elseif (!$params.state) { - $state = "present" -} - -If ((-not $params.password.GetType) -and ($state -eq 'present')) -{ - Fail-Json $result "missing required arguments: password" -} - -$username = Get-Attr $params "name" -$password = Get-Attr $params "password" - -$user_obj = Get-User $username - -if ($state -eq 'present') { - # Add or update user - try { - if ($user_obj.GetType) { - Update-Password $user_obj $password - } - else { - Create-User $username $password - } - $result.changed = $true - $user_obj = Get-User $username - } - catch { - Fail-Json $result $_.Exception.Message - } -} -else { - # Remove user - try { - if ($user_obj.GetType) { - Delete-User $user_obj - $result.changed = $true - } - else { - Set-Attr $result "msg" "User '$username' was not found" - } - } - catch { - Fail-Json $result $_.Exception.Message - } -} - -# Set-Attr $result "user" $user_obj -Set-Attr $result "user_name" $user_obj.Name -Set-Attr $result "user_fullname" $user_obj.FullName -Set-Attr $result "user_path" $user_obj.Path - -Exit-Json $result; diff --git a/packaging/arch/PKGBUILD b/packaging/arch/PKGBUILD index f4db6fbd7be..f2f94229065 100644 --- a/packaging/arch/PKGBUILD +++ b/packaging/arch/PKGBUILD @@ -1,4 +1,5 @@ # Maintainer: Michel Blanc +# Contributor: Scott Hansen https://github.com/firecat53 # Contributor: Buce # Contributor: Bartłomiej Piotrowski # Contributor: cgtx @@ -9,7 +10,7 @@ # Contributor: Michael DeHaan pkgname=ansible-git -pkgver=1.1.4095.g3f2f5fe +pkgver=1.6.0.1835.ga1809a3 pkgrel=1 pkgdesc='Radically simple IT automation platform' arch=('any') @@ -33,6 +34,7 @@ pkgver() { build() { cd $pkgname + git submodule update --init --recursive make PYTHON=python2 } @@ -40,7 +42,6 @@ package() { cd $pkgname install -dm755 $pkgdir/usr/share/ansible - cp -dpr --no-preserve=ownership ./library/* "$pkgdir/usr/share/ansible/" cp -dpr --no-preserve=ownership ./examples "$pkgdir/usr/share/ansible" python2 setup.py install -O1 --root="$pkgdir" diff --git a/packaging/debian/ansible.install b/packaging/debian/ansible.install index 30aef22b87c..2caf1452fe3 100644 --- a/packaging/debian/ansible.install +++ b/packaging/debian/ansible.install @@ -1,5 +1,4 @@ examples/hosts etc/ansible -library/* usr/share/ansible docs/man/man1/*.1 usr/share/man/man1 bin/* usr/bin examples/ansible.cfg etc/ansible diff --git a/packaging/debian/changelog b/packaging/debian/changelog index d8238612d65..5b7cb7c2f7b 100644 --- a/packaging/debian/changelog +++ b/packaging/debian/changelog @@ -1,9 +1,27 @@ -ansible (1.8) unstable; urgency=low +ansible (1.9) unstable; urgency=low - * 1.8 release (PENDING) + * 1.9 release (PENDING) -- Michael DeHaan Wed, 21 Oct 2015 04:29:00 -0500 +ansible (1.8) unstable; urgency=low + + * 1.8 release + + -- Michael DeHaan Tue, 25 Nov 2014 17:00:00 -0500 + +ansible (1.7.2) unstable; urgency=low + + * 1.7.2 release + + -- Michael DeHaan Wed, 24 Sep 2014 15:00:00 -0500 + +ansible (1.7.1) unstable; urgency=low + + * 1.7.1 release + + -- Michael DeHaan Thu, 14 Oct 2014 17:00:00 -0500 + ansible (1.7) unstable; urgency=low * 1.7.0 release diff --git a/packaging/gentoo/README.md b/packaging/gentoo/README.md index 7420860642d..991692c9c79 100644 --- a/packaging/gentoo/README.md +++ b/packaging/gentoo/README.md @@ -1,3 +1,3 @@ -Gentoo ebuilds are available here: +Gentoo ebuilds are available in the main tree: -https://github.com/uu/ubuilds +emerge ansible diff --git a/packaging/rpm/ansible.spec b/packaging/rpm/ansible.spec index 3d1b82bb049..71061b601b8 100644 --- a/packaging/rpm/ansible.spec +++ b/packaging/rpm/ansible.spec @@ -95,7 +95,6 @@ cp examples/ansible.cfg %{buildroot}/etc/ansible/ mkdir -p %{buildroot}/%{_mandir}/man1/ cp -v docs/man/man1/*.1 %{buildroot}/%{_mandir}/man1/ mkdir -p %{buildroot}/%{_datadir}/ansible -cp -rv library/* %{buildroot}/%{_datadir}/ansible/ %clean rm -rf %{buildroot} @@ -105,16 +104,21 @@ rm -rf %{buildroot} %{python_sitelib}/ansible* %{_bindir}/ansible* %dir %{_datadir}/ansible -%dir %{_datadir}/ansible/* -%{_datadir}/ansible/*/* %config(noreplace) %{_sysconfdir}/ansible %doc README.md PKG-INFO COPYING %doc %{_mandir}/man1/ansible* -%doc examples/playbooks - %changelog +* Tue Nov 25 2014 Michael DeHaan - 1.8.0 +- Release 1.8.0 + +* Wed Sep 24 2014 Michael DeHaan - 1.7.2 +- Release 1.7.2 + +* Thu Aug 14 2014 Michael DeHaan - 1.7.1 +- Release 1.7.1 + * Wed Aug 06 2014 Michael DeHaan - 1.7.0 - Release 1.7.0 diff --git a/plugins/callbacks/syslog_json.py b/plugins/callbacks/syslog_json.py new file mode 100644 index 00000000000..5ab764acfe7 --- /dev/null +++ b/plugins/callbacks/syslog_json.py @@ -0,0 +1,89 @@ +import os +import json + +import logging +import logging.handlers + + +class CallbackModule(object): + """ + logs ansible-playbook and ansible runs to a syslog server in json format + make sure you have in ansible.cfg: + callback_plugins = + and put the plugin in + + This plugin makes use of the following environment variables: + SYSLOG_SERVER (optional): defaults to localhost + SYSLOG_PORT (optional): defaults to 514 + """ + + def __init__(self): + self.logger = logging.getLogger('ansible logger') + self.logger.setLevel(logging.DEBUG) + + self.handler = logging.handlers.SysLogHandler( + address = (os.getenv('SYSLOG_SERVER','locahost'), + os.getenv('SYSLOG_PORT',514)), + facility=logging.handlers.SysLogHandler.LOG_USER + ) + self.logger.addHandler(handler) + + def on_any(self, *args, **kwargs): + pass + + def runner_on_failed(self, host, res, ignore_errors=False): + self.logger.info('RUNNER_ON_FAILED ' + host + ' ' + json.dumps(res, sort_keys=True)) + + def runner_on_ok(self, host, res): + self.logger.info('RUNNER_ON_OK ' + host + ' ' + json.dumps(res, sort_keys=True)) + + def runner_on_skipped(self, host, item=None): + self.logger.info('RUNNER_ON_SKIPPED ' + host + ' ...') + + def runner_on_unreachable(self, host, res): + self.logger.info('RUNNER_UNREACHABLE ' + host + ' ' + json.dumps(res, sort_keys=True)) + + def runner_on_no_hosts(self): + pass + + def runner_on_async_poll(self, host, res): + pass + + def runner_on_async_ok(self, host, res): + pass + + def runner_on_async_failed(self, host, res): + self.logger.info('RUNNER_SYNC_FAILED ' + host + ' ' + json.dumps(res, sort_keys=True)) + + def playbook_on_start(self): + pass + + def playbook_on_notify(self, host, handler): + pass + + def playbook_on_no_hosts_matched(self): + pass + + def playbook_on_no_hosts_remaining(self): + pass + + def playbook_on_task_start(self, name, is_conditional): + pass + + def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): + pass + + def playbook_on_setup(self): + pass + + def playbook_on_import_for_host(self, host, imported_file): + self.logger.info('PLAYBOOK_ON_IMPORTED ' + host + ' ' + json.dumps(res, sort_keys=True)) + + def playbook_on_not_import_for_host(self, host, missing_file): + self.logger.info('PLAYBOOK_ON_NOTIMPORTED ' + host + ' ' + json.dumps(res, sort_keys=True)) + + def playbook_on_play_start(self, name): + pass + + def playbook_on_stats(self, stats): + pass diff --git a/plugins/inventory/cobbler.py b/plugins/inventory/cobbler.py index f607da2dc28..f352c8cf9d2 100755 --- a/plugins/inventory/cobbler.py +++ b/plugins/inventory/cobbler.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python """ Cobbler external inventory script diff --git a/plugins/inventory/collins.py b/plugins/inventory/collins.py index 1561fcf5c72..64e16f57069 100755 --- a/plugins/inventory/collins.py +++ b/plugins/inventory/collins.py @@ -41,7 +41,9 @@ that will be used instead of the configured values if they are set: If errors are encountered during operation, this script will return an exit code of 255; otherwise, it will return an exit code of 0. -Tested against Ansible 1.6.6 and Collins 1.2.4. +Collins attributes are accessable as variables in ansible via the COLLINS['attribute_name']. + +Tested against Ansible 1.8.2 and Collins 1.3.0. """ # (c) 2014, Steve Salevan @@ -305,6 +307,8 @@ class CollinsInventory(object): else: ip_index = self.ip_address_index + asset['COLLINS'] = {} + # Attempts to locate the asset's primary identifier (hostname or IP address), # which will be used to index the asset throughout the Ansible inventory. if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'): @@ -332,8 +336,8 @@ class CollinsInventory(object): if 'ATTRIBS' in asset: for attrib_block in asset['ATTRIBS'].keys(): for attrib in asset['ATTRIBS'][attrib_block].keys(): - attrib_key = self.to_safe( - '%s-%s' % (attrib, asset['ATTRIBS'][attrib_block][attrib])) + asset['COLLINS'][attrib] = asset['ATTRIBS'][attrib_block][attrib] + attrib_key = self.to_safe('%s-%s' % (attrib, asset['ATTRIBS'][attrib_block][attrib])) self.push(self.inventory, attrib_key, asset_identifier) # Indexes asset by all built-in Collins attributes. diff --git a/plugins/inventory/consul.ini b/plugins/inventory/consul.ini new file mode 100644 index 00000000000..8761494ab97 --- /dev/null +++ b/plugins/inventory/consul.ini @@ -0,0 +1,37 @@ +# Ansible Consul external inventory script settings. + +[consul] + +# restrict included nodes to those from this datacenter +#datacenter = nyc1 + +# url of the the consul cluster to query +#url = http://demo.consul.io +url = http://localhost:8500 + +# suffix added to each service to create a group name e.g Service of 'redis' and +# a suffix of '_servers' will add each address to the group name 'redis_servers' +servers_suffix = _servers + +# if specified then the inventory will generate domain names that will resolve +# via Consul's inbuilt DNS. +#domain=consul + +# make groups from service tags. the name of the group is derived from the +# service name and the tag name e.g. a service named nginx with tags ['master', 'v1'] +# will create groups nginx_master and nginx_v1 +tags = true + +# looks up the node name at the given path for a list of groups to which the +# node should be added. +kv_groups=ansible/groups + +# looks up the node name at the given path for a json dictionary of metadata that +# should be attached as metadata for the node +kv_metadata=ansible/metadata + +# looks up the health of each service and adds the node to 'up' and 'down' groups +# based on the service availibility +availability = true +available_suffix = _up +unavailable_suffix = _down diff --git a/plugins/inventory/consul_io.py b/plugins/inventory/consul_io.py new file mode 100755 index 00000000000..46d47fd3bf5 --- /dev/null +++ b/plugins/inventory/consul_io.py @@ -0,0 +1,427 @@ +#!/usr/bin/env python + +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +''' +Consul.io inventory script (http://consul.io) +====================================== + +Generates Ansible inventory from nodes in a Consul cluster. This script will +group nodes by: + - datacenter, + - registered service + - service tags + - service status + - values from the k/v store + +This script can be run with the switches +--list as expected groups all the nodes in all datacenters +--datacenter, to restrict the nodes to a single datacenter +--host to restrict the inventory to a single named node. (requires datacenter config) + +The configuration for this plugin is read from a consul.ini file located in the +same directory as this inventory script. All config options in the config file +are optional except the host and port, which must point to a valid agent or +server running the http api. For more information on enabling the endpoint see. + +http://www.consul.io/docs/agent/options.html + +Other options include: + +'datacenter': + +which restricts the included nodes to those from the given datacenter + +'domain': + +if specified then the inventory will generate domain names that will resolve +via Consul's inbuilt DNS. The name is derived from the node name, datacenter +and domain .node... Note that you will need to +have consul hooked into your DNS server for these to resolve. See the consul +DNS docs for more info. + +which restricts the included nodes to those from the given datacenter + +'servers_suffix': + +defining the a suffix to add to the service name when creating the service +group. e.g Service name of 'redis' and a suffix of '_servers' will add +each nodes address to the group name 'redis_servers'. No suffix is added +if this is not set + +'tags': + +boolean flag defining if service tags should be used to create Inventory +groups e.g. an nginx service with the tags ['master', 'v1'] will create +groups nginx_master and nginx_v1 to which the node running the service +will be added. No tag groups are created if this is missing. + +'token': + +ACL token to use to authorize access to the key value store. May be required +to retrieve the kv_groups and kv_metadata based on your consul configuration. + +'kv_groups': + +This is used to lookup groups for a node in the key value store. It specifies a +path to which each discovered node's name will be added to create a key to query +the key/value store. There it expects to find a comma separated list of group +names to which the node should be added e.g. if the inventory contains +'nyc-web-1' and kv_groups = 'ansible/groups' then the key +'v1/kv/ansible/groups/nyc-web-1' will be queried for a group list. If this query + returned 'test,honeypot' then the node address to both groups. + +'kv_metadata': + +kv_metadata is used to lookup metadata for each discovered node. Like kv_groups +above it is used to build a path to lookup in the kv store where it expects to +find a json dictionary of metadata entries. If found, each key/value pair in the +dictionary is added to the metadata for the node. + +'availability': + +if true then availability groups will be created for each service. The node will +be added to one of the groups based on the health status of the service. The +group name is derived from the service name and the configurable availability +suffixes + +'available_suffix': + +suffix that should be appended to the service availability groups for available +services e.g. if the suffix is '_up' and the service is nginx, then nodes with +healthy nginx services will be added to the nginix_up group. Defaults to +'_available' + +'unavailable_suffix': + +as above but for unhealthy services, defaults to '_unavailable' + +Note that if the inventory discovers an 'ssh' service running on a node it will +register the port as ansible_ssh_port in the node's metadata and this port will +be used to access the machine. +``` + +''' + +import os +import re +import argparse +from time import time +import ConfigParser +import urllib, urllib2, base64 + +try: + import json +except ImportError: + import simplejson as json + +try: + import consul +except ImportError, e: + print """failed=True msg='python-consul required for this module. see + http://python-consul.readthedocs.org/en/latest/#installation'""" + sys.exit(1) + + + +class ConsulInventory(object): + + def __init__(self): + ''' Create an inventory based on the catalog of nodes and services + registered in a consul cluster''' + self.node_metadata = {} + self.nodes = {} + self.nodes_by_service = {} + self.nodes_by_tag = {} + self.nodes_by_datacenter = {} + self.nodes_by_kv = {} + self.nodes_by_availability = {} + self.current_dc = None + + config = ConsulConfig() + self.config = config + + self.consul_api = config.get_consul_api() + + if config.has_config('datacenter'): + if config.has_config('host'): + self.load_data_for_node(config.host, config.datacenter) + else: + self.load_data_for_datacenter(config.datacenter) + else: + self.load_all_data_consul() + + self.combine_all_results() + print json.dumps(self.inventory, sort_keys=True, indent=2) + + def load_all_data_consul(self): + ''' cycle through each of the datacenters in the consul catalog and process + the nodes in each ''' + self.datacenters = self.consul_api.catalog.datacenters() + for datacenter in self.datacenters: + self.current_dc = datacenter + self.load_data_for_datacenter(datacenter) + + + def load_availability_groups(self, node, datacenter): + '''check the health of each service on a node and add add the node to either + an 'available' or 'unavailable' grouping. The suffix for each group can be + controlled from the config''' + if self.config.has_config('availability'): + for service_name, service in node['Services'].iteritems(): + for node in self.consul_api.health.service(service_name)[1]: + for check in node['Checks']: + if check['ServiceName'] == service_name: + ok = 'passing' == check['Status'] + if ok: + suffix = self.config.get_availability_suffix( + 'available_suffix', '_available') + else: + suffix = self.config.get_availability_suffix( + 'unavailable_suffix', '_unavailable') + self.add_node_to_map(self.nodes_by_availability, + service_name + suffix, node['Node']) + + + def load_data_for_datacenter(self, datacenter): + '''processes all the nodes in a particular datacenter''' + index, nodes = self.consul_api.catalog.nodes(dc=datacenter) + for node in nodes: + self.add_node_to_map(self.nodes_by_datacenter, datacenter, node) + self.load_data_for_node(node['Node'], datacenter) + + def load_data_for_node(self, node, datacenter): + '''loads the data for a sinle node adding it to various groups based on + metadata retrieved from the kv store and service availablity''' + + index, node_data = self.consul_api.catalog.node(node, datacenter) + node = node_data['Node'] + self.add_node_to_map(self.nodes, 'all', node) + self.add_metadata(node_data, "consul_datacenter", datacenter) + self.add_metadata(node_data, "consul_nodename", node['Node']) + + self.load_groups_from_kv(node_data) + self.load_node_metadata_from_kv(node_data) + self.load_availability_groups(node_data, datacenter) + + for name, service in node_data['Services'].items(): + self.load_data_from_service(name, service, node_data) + + def load_node_metadata_from_kv(self, node_data): + ''' load the json dict at the metadata path defined by the kv_metadata value + and the node name add each entry in the dictionary to the the node's + metadata ''' + node = node_data['Node'] + if self.config.has_config('kv_metadata'): + key = "%s/%s/%s" % (self.config.kv_metadata, self.current_dc, node['Node']) + index, metadata = self.consul_api.kv.get(key) + if metadata and metadata['Value']: + try: + metadata = json.loads(metadata['Value']) + for k,v in metadata.items(): + self.add_metadata(node_data, k, v) + except: + pass + + def load_groups_from_kv(self, node_data): + ''' load the comma separated list of groups at the path defined by the + kv_groups config value and the node name add the node address to each + group found ''' + node = node_data['Node'] + if self.config.has_config('kv_groups'): + key = "%s/%s/%s" % (self.config.kv_groups, self.current_dc, node['Node']) + index, groups = self.consul_api.kv.get(key) + if groups and groups['Value']: + for group in groups['Value'].split(','): + self.add_node_to_map(self.nodes_by_kv, group.strip(), node) + + def load_data_from_service(self, service_name, service, node_data): + '''process a service registered on a node, adding the node to a group with + the service name. Each service tag is extracted and the node is added to a + tag grouping also''' + self.add_metadata(node_data, "consul_services", service_name, True) + + if self.is_service("ssh", service_name): + self.add_metadata(node_data, "ansible_ssh_port", service['Port']) + + if self.config.has_config('servers_suffix'): + service_name = service_name + self.config.servers_suffix + + self.add_node_to_map(self.nodes_by_service, service_name, node_data['Node']) + self.extract_groups_from_tags(service_name, service, node_data) + + def is_service(self, target, name): + return name and (name.lower() == target.lower()) + + def extract_groups_from_tags(self, service_name, service, node_data): + '''iterates each service tag and adds the node to groups derived from the + service and tag names e.g. nginx_master''' + if self.config.has_config('tags') and service['Tags']: + tags = service['Tags'] + self.add_metadata(node_data, "consul_%s_tags" % service_name, tags) + for tag in service['Tags']: + tagname = service_name +'_'+tag + self.add_node_to_map(self.nodes_by_tag, tagname, node_data['Node']) + + def combine_all_results(self): + '''prunes and sorts all groupings for combination into the final map''' + self.inventory = {"_meta": { "hostvars" : self.node_metadata}} + groupings = [self.nodes, self.nodes_by_datacenter, self.nodes_by_service, + self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability] + for grouping in groupings: + for name, addresses in grouping.items(): + self.inventory[name] = sorted(list(set(addresses))) + + def add_metadata(self, node_data, key, value, is_list = False): + ''' Pushed an element onto a metadata dict for the node, creating + the dict if it doesn't exist ''' + key = self.to_safe(key) + node = self.get_inventory_name(node_data['Node']) + + if node in self.node_metadata: + metadata = self.node_metadata[node] + else: + metadata = {} + self.node_metadata[node] = metadata + if is_list: + self.push(metadata, key, value) + else: + metadata[key] = value + + def get_inventory_name(self, node_data): + '''return the ip or a node name that can be looked up in consul's dns''' + domain = self.config.domain + if domain: + node_name = node_data['Node'] + if self.current_dc: + return '%s.node.%s.%s' % ( node_name, self.current_dc, domain) + else: + return '%s.node.%s' % ( node_name, domain) + else: + return node_data['Address'] + + def add_node_to_map(self, map, name, node): + self.push(map, name, self.get_inventory_name(node)) + + + def push(self, my_dict, key, element): + ''' Pushed an element onto an array that may not have been defined in the + dict ''' + key = self.to_safe(key) + if key in my_dict: + my_dict[key].append(element) + else: + my_dict[key] = [element] + + def to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be used + as Ansible groups ''' + return re.sub('[^A-Za-z0-9\-\.]', '_', word) + + def sanitize_dict(self, d): + + new_dict = {} + for k, v in d.items(): + if v != None: + new_dict[self.to_safe(str(k))] = self.to_safe(str(v)) + return new_dict + + def sanitize_list(self, seq): + new_seq = [] + for d in seq: + new_seq.append(self.sanitize_dict(d)) + return new_seq + + +class ConsulConfig(dict): + + def __init__(self): + self.read_settings() + self.read_cli_args() + + def has_config(self, name): + if hasattr(self, name): + return getattr(self, name) + else: + return False + + def read_settings(self): + ''' Reads the settings from the consul.ini file ''' + config = ConfigParser.SafeConfigParser() + config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul.ini') + + config_options = ['host', 'token', 'datacenter', 'servers_suffix', + 'tags', 'kv_metadata', 'kv_groups', 'availability', + 'unavailable_suffix', 'available_suffix', 'url', + 'domain'] + for option in config_options: + value = None + if config.has_option('consul', option): + value = config.get('consul', option) + setattr(self, option, value) + + def read_cli_args(self): + ''' Command line argument processing ''' + parser = argparse.ArgumentParser(description= + 'Produce an Ansible Inventory file based nodes in a Consul cluster') + + parser.add_argument('--list', action='store_true', + help='Get all inventory variables from all nodes in the consul cluster') + parser.add_argument('--host', action='store', + help='Get all inventory variables about a specific consul node, \ + requires datacenter set in consul.ini.') + parser.add_argument('--datacenter', action='store', + help='Get all inventory about a specific consul datacenter') + + args = parser.parse_args() + arg_names = ['host', 'datacenter'] + + for arg in arg_names: + if getattr(args, arg): + setattr(self, arg, getattr(args, arg)) + + def get_availability_suffix(self, suffix, default): + if self.has_config(suffix): + return self.has_config(suffix) + return default + + + def get_consul_api(self): + '''get an instance of the api based on the supplied configuration''' + host = 'localhost' + port = 8500 + token = None + + if hasattr(self, 'url'): + from urlparse import urlparse + o = urlparse(self.url) + if o.hostname: + host = o.hostname + if o.port: + port = o.port + + if hasattr(self, 'token'): + token = self.token + if not token: + token = 'anonymous' + return consul.Consul(host=host, port=port, token=token) + +ConsulInventory() diff --git a/plugins/inventory/ec2.ini b/plugins/inventory/ec2.ini index a0c8672394c..523a80ed833 100644 --- a/plugins/inventory/ec2.ini +++ b/plugins/inventory/ec2.ini @@ -24,14 +24,17 @@ regions_exclude = us-gov-west-1,cn-north-1 # This is the normal destination variable to use. If you are running Ansible # from outside EC2, then 'public_dns_name' makes the most sense. If you are # running Ansible from within EC2, then perhaps you want to use the internal -# address, and should set this to 'private_dns_name'. +# address, and should set this to 'private_dns_name'. The key of an EC2 tag +# may optionally be used; however the boto instance variables hold precedence +# in the event of a collision. destination_variable = public_dns_name # For server inside a VPC, using DNS names may not make sense. When an instance # has 'subnet_id' set, this variable is used. If the subnet is public, setting # this to 'ip_address' will return the public IP address. For instances in a # private subnet, this should be set to 'private_ip_address', and Ansible must -# be run from with EC2. +# be run from with EC2. The key of an EC2 tag may optionally be used; however +# the boto instance variables hold precedence in the event of a collision. vpc_destination_variable = ip_address # To tag instances on EC2 with the resource records that point to them from @@ -68,8 +71,44 @@ cache_max_age = 300 # Organize groups into a nested/hierarchy instead of a flat namespace. nested_groups = False +# The EC2 inventory output can become very large. To manage its size, +# configure which groups should be created. +group_by_instance_id = True +group_by_region = True +group_by_availability_zone = True +group_by_ami_id = True +group_by_instance_type = True +group_by_key_pair = True +group_by_vpc_id = True +group_by_security_group = True +group_by_tag_keys = True +group_by_tag_none = True +group_by_route53_names = True +group_by_rds_engine = True +group_by_rds_parameter_group = True + # If you only want to include hosts that match a certain regular expression # pattern_include = stage-* # If you want to exclude any hosts that match a certain regular expression # pattern_exclude = stage-* + +# Instance filters can be used to control which instances are retrieved for +# inventory. For the full list of possible filters, please read the EC2 API +# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters +# Filters are key/value pairs separated by '=', to list multiple filters use +# a list separated by commas. See examples below. + +# Retrieve only instances with (key=value) env=stage tag +# instance_filters = tag:env=stage + +# Retrieve only instances with role=webservers OR role=dbservers tag +# instance_filters = tag:role=webservers,tag:role=dbservers + +# Retrieve only t1.micro instances OR instances with tag env=stage +# instance_filters = instance-type=t1.micro,tag:env=stage + +# You can use wildcards in filter values also. Below will list instances which +# tag Name value matches webservers1* +# (ex. webservers15, webservers1a, webservers123 etc) +# instance_filters = tag:Name=webservers1* diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index f4e98f6dd73..0f7c1985752 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -123,6 +123,7 @@ from boto import ec2 from boto import rds from boto import route53 import ConfigParser +from collections import defaultdict try: import json @@ -252,6 +253,28 @@ class Ec2Inventory(object): else: self.nested_groups = False + # Configure which groups should be created. + group_by_options = [ + 'group_by_instance_id', + 'group_by_region', + 'group_by_availability_zone', + 'group_by_ami_id', + 'group_by_instance_type', + 'group_by_key_pair', + 'group_by_vpc_id', + 'group_by_security_group', + 'group_by_tag_keys', + 'group_by_tag_none', + 'group_by_route53_names', + 'group_by_rds_engine', + 'group_by_rds_parameter_group', + ] + for option in group_by_options: + if config.has_option('ec2', option): + setattr(self, option, config.getboolean('ec2', option)) + else: + setattr(self, option, True) + # Do we need to just include hosts that match a pattern? try: pattern_include = config.get('ec2', 'pattern_include') @@ -272,6 +295,18 @@ class Ec2Inventory(object): except ConfigParser.NoOptionError, e: self.pattern_exclude = None + # Instance filters (see boto and EC2 API docs). Ignore invalid filters. + self.ec2_instance_filters = defaultdict(list) + if config.has_option('ec2', 'instance_filters'): + for instance_filter in config.get('ec2', 'instance_filters', '').split(','): + instance_filter = instance_filter.strip() + if not instance_filter or '=' not in instance_filter: + continue + filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] + if not filter_key: + continue + self.ec2_instance_filters[filter_key].append(filter_value) + def parse_cli_args(self): ''' Command line argument processing ''' @@ -316,7 +351,13 @@ class Ec2Inventory(object): print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) sys.exit(1) - reservations = conn.get_all_instances() + reservations = [] + if self.ec2_instance_filters: + for filter_key, filter_values in self.ec2_instance_filters.iteritems(): + reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) + else: + reservations = conn.get_all_instances() + for reservation in reservations: for instance in reservation.instances: self.add_instance(instance, region) @@ -371,9 +412,13 @@ class Ec2Inventory(object): # Select the best destination address if instance.subnet_id: - dest = getattr(instance, self.vpc_destination_variable) + dest = getattr(instance, self.vpc_destination_variable, None) + if dest is None: + dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) else: - dest = getattr(instance, self.destination_variable) + dest = getattr(instance, self.destination_variable, None) + if dest is None: + dest = getattr(instance, 'tags').get(self.destination_variable, None) if not dest: # Skip instances we cannot address (e.g. private VPC subnet) @@ -391,62 +436,89 @@ class Ec2Inventory(object): self.index[dest] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) - self.inventory[instance.id] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'instances', instance.id) + if self.group_by_instance_id: + self.inventory[instance.id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - else: + if self.group_by_region: self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone - self.push(self.inventory, instance.placement, dest) - if self.nested_groups: - self.push_group(self.inventory, region, instance.placement) + if self.group_by_availability_zone: + self.push(self.inventory, instance.placement, dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, instance.placement) + self.push_group(self.inventory, 'zones', instance.placement) + + # Inventory: Group by Amazon Machine Image (AMI) ID + if self.group_by_ami_id: + ami_id = self.to_safe(instance.image_id) + self.push(self.inventory, ami_id, dest) + if self.nested_groups: + self.push_group(self.inventory, 'images', ami_id) # Inventory: Group by instance type - type_name = self.to_safe('type_' + instance.instance_type) - self.push(self.inventory, type_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) + if self.group_by_instance_type: + type_name = self.to_safe('type_' + instance.instance_type) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) # Inventory: Group by key pair - if instance.key_name: + if self.group_by_key_pair and instance.key_name: key_name = self.to_safe('key_' + instance.key_name) self.push(self.inventory, key_name, dest) if self.nested_groups: self.push_group(self.inventory, 'keys', key_name) - + + # Inventory: Group by VPC + if self.group_by_vpc_id and instance.vpc_id: + vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) + self.push(self.inventory, vpc_id_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'vpcs', vpc_id_name) + # Inventory: Group by security group - try: - for group in instance.groups: - key = self.to_safe("security_group_" + group.name) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - except AttributeError: - print 'Package boto seems a bit older.' - print 'Please upgrade boto >= 2.3.0.' - sys.exit(1) + if self.group_by_security_group: + try: + for group in instance.groups: + key = self.to_safe("security_group_" + group.name) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + except AttributeError: + print 'Package boto seems a bit older.' + print 'Please upgrade boto >= 2.3.0.' + sys.exit(1) # Inventory: Group by tag keys - for k, v in instance.tags.iteritems(): - key = self.to_safe("tag_" + k + "=" + v) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) - self.push_group(self.inventory, self.to_safe("tag_" + k), key) + if self.group_by_tag_keys: + for k, v in instance.tags.iteritems(): + key = self.to_safe("tag_" + k + "=" + v) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) + self.push_group(self.inventory, self.to_safe("tag_" + k), key) # Inventory: Group by Route53 domain names if enabled - if self.route53_enabled: + if self.route53_enabled and self.group_by_route53_names: route53_names = self.get_instance_route53_names(instance) for name in route53_names: self.push(self.inventory, name, dest) if self.nested_groups: self.push_group(self.inventory, 'route53', name) + # Global Tag: instances without tags + if self.group_by_tag_none and len(instance.tags) == 0: + self.push(self.inventory, 'tag_none', dest) + if self.nested_groups: + self.push_group(self.inventory, 'tags', 'tag_none') + # Global Tag: tag all EC2 instances self.push(self.inventory, 'ec2', dest) @@ -462,10 +534,6 @@ class Ec2Inventory(object): return # Select the best destination address - #if instance.subnet_id: - #dest = getattr(instance, self.vpc_destination_variable) - #else: - #dest = getattr(instance, self.destination_variable) dest = instance.endpoint[0] if not dest: @@ -476,49 +544,64 @@ class Ec2Inventory(object): self.index[dest] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) - self.inventory[instance.id] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'instances', instance.id) + if self.group_by_instance_id: + self.inventory[instance.id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - else: + if self.group_by_region: self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone - self.push(self.inventory, instance.availability_zone, dest) - if self.nested_groups: - self.push_group(self.inventory, region, instance.availability_zone) - - # Inventory: Group by instance type - type_name = self.to_safe('type_' + instance.instance_class) - self.push(self.inventory, type_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by security group - try: - if instance.security_group: - key = self.to_safe("security_group_" + instance.security_group.name) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) + if self.group_by_availability_zone: + self.push(self.inventory, instance.availability_zone, dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, instance.availability_zone) + self.push_group(self.inventory, 'zones', instance.availability_zone) - except AttributeError: - print 'Package boto seems a bit older.' - print 'Please upgrade boto >= 2.3.0.' - sys.exit(1) + # Inventory: Group by instance type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + instance.instance_class) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC + if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: + vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) + self.push(self.inventory, vpc_id_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'vpcs', vpc_id_name) + + # Inventory: Group by security group + if self.group_by_security_group: + try: + if instance.security_group: + key = self.to_safe("security_group_" + instance.security_group.name) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + except AttributeError: + print 'Package boto seems a bit older.' + print 'Please upgrade boto >= 2.3.0.' + sys.exit(1) # Inventory: Group by engine - self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) - if self.nested_groups: - self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) + if self.group_by_rds_engine: + self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) + if self.nested_groups: + self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) # Inventory: Group by parameter group - self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) - if self.nested_groups: - self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) + if self.group_by_rds_parameter_group: + self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) + if self.nested_groups: + self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) # Global Tag: all RDS instances self.push(self.inventory, 'rds', dest) @@ -608,8 +691,8 @@ class Ec2Inventory(object): for group in value: group_ids.append(group.id) group_names.append(group.name) - instance_vars["ec2_security_group_ids"] = ','.join(group_ids) - instance_vars["ec2_security_group_names"] = ','.join(group_names) + instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) + instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) else: pass # TODO Product codes if someone finds them useful @@ -630,7 +713,7 @@ class Ec2Inventory(object): # try updating the cache self.do_api_calls_update_cache() if not self.args.host in self.index: - # host migh not exist anymore + # host might not exist anymore return self.json_format_dict({}, True) (region, instance_id) = self.index[self.args.host] diff --git a/plugins/inventory/freeipa.py b/plugins/inventory/freeipa.py new file mode 100755 index 00000000000..caf336239cc --- /dev/null +++ b/plugins/inventory/freeipa.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python + +import argparse +from ipalib import api +import json + +def initialize(): + ''' + This function initializes the FreeIPA/IPA API. This function requires + no arguments. A kerberos key must be present in the users keyring in + order for this to work. + ''' + + api.bootstrap(context='cli') + api.finalize() + api.Backend.xmlclient.connect() + + return api + +def list_groups(api): + ''' + This function returns a list of all host groups. This function requires + one argument, the FreeIPA/IPA API object. + ''' + + inventory = {} + hostvars={} + meta={} + + result = api.Command.hostgroup_find()['result'] + + for hostgroup in result: + inventory[hostgroup['cn'][0]] = { 'hosts': [host for host in hostgroup['member_host']]} + + for host in hostgroup['member_host']: + hostvars[host] = {} + + inventory['_meta'] = {'hostvars': hostvars} + inv_string = json.dumps(inventory, indent=1, sort_keys=True) + print inv_string + + return None + +def parse_args(): + ''' + This function parses the arguments that were passed in via the command line. + This function expects no arguments. + ''' + + parser = argparse.ArgumentParser(description='Ansible FreeIPA/IPA ' + 'inventory module') + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('--list', action='store_true', + help='List active servers') + group.add_argument('--host', help='List details about the specified host') + + return parser.parse_args() + +def print_host(host): + ''' + This function is really a stub, it could return variables to be used in + a playbook. However, at this point there are no variables stored in + FreeIPA/IPA. + + This function expects one string, this hostname to lookup variables for. + ''' + + print json.dumps({}) + + return None + +if __name__ == '__main__': + args = parse_args() + + if args.host: + print_host(args.host) + elif args.list: + api = initialize() + list_groups(api) diff --git a/plugins/inventory/gce.py b/plugins/inventory/gce.py index c8eeb43ab1b..e77178c16b3 100755 --- a/plugins/inventory/gce.py +++ b/plugins/inventory/gce.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # Copyright 2013 Google Inc. # # This file is part of Ansible @@ -103,11 +103,13 @@ class GceInventory(object): # Just display data for specific host if self.args.host: print self.json_format_dict(self.node_to_dict( - self.get_instance(self.args.host))) + self.get_instance(self.args.host)), + pretty=self.args.pretty) sys.exit(0) # Otherwise, assume user wants all instances grouped - print(self.json_format_dict(self.group_instances())) + print(self.json_format_dict(self.group_instances(), + pretty=self.args.pretty)) sys.exit(0) def get_gce_driver(self): @@ -187,6 +189,8 @@ class GceInventory(object): help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all information about an instance') + parser.add_argument('--pretty', action='store_true', default=False, + help='Pretty format (default: False)') self.args = parser.parse_args() @@ -229,9 +233,14 @@ class GceInventory(object): def group_instances(self): '''Group all instances''' groups = {} + meta = {} + meta["hostvars"] = {} + for node in self.driver.list_nodes(): name = node.name + meta["hostvars"][name] = self.node_to_dict(node) + zone = node.extra['zone'].name if groups.has_key(zone): groups[zone].append(name) else: groups[zone] = [name] @@ -259,6 +268,9 @@ class GceInventory(object): stat = 'status_%s' % status.lower() if groups.has_key(stat): groups[stat].append(name) else: groups[stat] = [name] + + groups["_meta"] = meta + return groups def json_format_dict(self, data, pretty=False): diff --git a/plugins/inventory/rax.ini b/plugins/inventory/rax.ini new file mode 100644 index 00000000000..5a269e16a3a --- /dev/null +++ b/plugins/inventory/rax.ini @@ -0,0 +1,57 @@ +# Ansible Rackspace external inventory script settings +# + +[rax] + +# Environment Variable: RAX_CREDS_FILE +# +# An optional configuration that points to a pyrax-compatible credentials +# file. +# +# If not supplied, rax.py will look for a credentials file +# at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, +# and therefore requires a file formatted per the SDK's specifications. +# +# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md +# creds_file = ~/.rackspace_cloud_credentials + +# Environment Variable: RAX_REGION +# +# An optional environment variable to narrow inventory search +# scope. If used, needs a value like ORD, DFW, SYD (a Rackspace +# datacenter) and optionally accepts a comma-separated list. +# regions = IAD,ORD,DFW + +# Environment Variable: RAX_ENV +# +# A configuration that will use an environment as configured in +# ~/.pyrax.cfg, see +# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md +# env = prod + +# Environment Variable: RAX_META_PREFIX +# Default: meta +# +# A configuration that changes the prefix used for meta key/value groups. +# For compatibility with ec2.py set to "tag" +# meta_prefix = meta + +# Environment Variable: RAX_ACCESS_NETWORK +# Default: public +# +# A configuration that will tell the inventory script to use a specific +# server network to determine the ansible_ssh_host value. If no address +# is found, ansible_ssh_host will not be set. Accepts a comma-separated +# list of network names, the first found wins. +# access_network = public + +# Environment Variable: RAX_ACCESS_IP_VERSION +# Default: 4 +# +# A configuration related to "access_network" that will attempt to +# determine the ansible_ssh_host value for either IPv4 or IPv6. If no +# address is found, ansible_ssh_host will not be set. +# Acceptable values are: 4 or 6. Values other than 4 or 6 +# will be ignored, and 4 will be used. Accepts a comma separated list, +# the first found wins. +# access_ip_version = 4 diff --git a/plugins/inventory/rax.py b/plugins/inventory/rax.py old mode 100755 new mode 100644 index 457c20962a6..10b72d322bf --- a/plugins/inventory/rax.py +++ b/plugins/inventory/rax.py @@ -1,8 +1,10 @@ #!/usr/bin/env python -# (c) 2013, Jesse Keating +# (c) 2013, Jesse Keating , +# Matt Martz # -# This file is part of Ansible, +# This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -17,16 +19,20 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -DOCUMENTATION = ''' ---- -inventory: rax -short_description: Rackspace Public Cloud external inventory script -description: - - Generates inventory that Ansible can understand by making API request to +""" +Rackspace Cloud Inventory + +Authors: + Jesse Keating , + Matt Martz + + +Description: + Generates inventory that Ansible can understand by making API request to Rackspace Public Cloud API - - | - When run against a specific host, this script returns the following - variables: + + When run against a specific host, this script returns variables similar to: rax_os-ext-sts_task_state rax_addresses rax_links @@ -50,72 +56,131 @@ description: rax_tenant_id rax_loaded - where some item can have nested structure. - - credentials are set in a credentials file -version_added: None -options: - creds_file: - description: - - File to find the Rackspace Public Cloud credentials in - required: true - default: null - region: - description: - - An optional value to narrow inventory scope, i.e. DFW, ORD, IAD, LON - required: false - default: null -authors: - - Jesse Keating - - Paul Durivage - - Matt Martz -notes: - - RAX_CREDS_FILE is an optional environment variable that points to a - pyrax-compatible credentials file. - - If RAX_CREDS_FILE is not supplied, rax.py will look for a credentials file - at ~/.rackspace_cloud_credentials. - - See https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating - - RAX_REGION is an optional environment variable to narrow inventory search - scope - - RAX_REGION, if used, needs a value like ORD, DFW, SYD (a Rackspace - datacenter) and optionally accepts a comma-separated list - - RAX_ENV is an environment variable that will use an environment as - configured in ~/.pyrax.cfg, see - https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration - - RAX_META_PREFIX is an environment variable that changes the prefix used - for meta key/value groups. For compatibility with ec2.py set to - RAX_META_PREFIX=tag -requirements: [ "pyrax" ] -examples: - - description: List server instances - code: RAX_CREDS_FILE=~/.raxpub rax.py --list - - description: List servers in ORD datacenter only - code: RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list - - description: List servers in ORD and DFW datacenters - code: RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list - - description: Get server details for server named "server.example.com" - code: RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com -''' +Configuration: + rax.py can be configured using a rax.ini file or via environment + variables. The rax.ini file should live in the same directory along side + this script. + + The section header for configuration values related to this + inventory plugin is [rax] + + [rax] + creds_file = ~/.rackspace_cloud_credentials + regions = IAD,ORD,DFW + env = prod + meta_prefix = meta + access_network = public + access_ip_version = 4 + + Each of these configurations also has a corresponding environment variable. + An environment variable will override a configuration file value. + + creds_file: + Environment Variable: RAX_CREDS_FILE + + An optional configuration that points to a pyrax-compatible credentials + file. + + If not supplied, rax.py will look for a credentials file + at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, + and therefore requires a file formatted per the SDK's specifications. + + https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md + + regions: + Environment Variable: RAX_REGION + + An optional environment variable to narrow inventory search + scope. If used, needs a value like ORD, DFW, SYD (a Rackspace + datacenter) and optionally accepts a comma-separated list. + + environment: + Environment Variable: RAX_ENV + + A configuration that will use an environment as configured in + ~/.pyrax.cfg, see + https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md + + meta_prefix: + Environment Variable: RAX_META_PREFIX + Default: meta + + A configuration that changes the prefix used for meta key/value groups. + For compatibility with ec2.py set to "tag" + + access_network: + Environment Variable: RAX_ACCESS_NETWORK + Default: public + + A configuration that will tell the inventory script to use a specific + server network to determine the ansible_ssh_host value. If no address + is found, ansible_ssh_host will not be set. Accepts a comma-separated + list of network names, the first found wins. + + access_ip_version: + Environment Variable: RAX_ACCESS_IP_VERSION + Default: 4 + + A configuration related to "access_network" that will attempt to + determine the ansible_ssh_host value for either IPv4 or IPv6. If no + address is found, ansible_ssh_host will not be set. + Acceptable values are: 4 or 6. Values other than 4 or 6 + will be ignored, and 4 will be used. Accepts a comma-separated list, + the first found wins. + +Examples: + List server instances + $ RAX_CREDS_FILE=~/.raxpub rax.py --list + + List servers in ORD datacenter only + $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list + + List servers in ORD and DFW datacenters + $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list + + Get server details for server named "server.example.com" + $ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com + + Use the instance private IP to connect (instead of public IP) + $ RAX_CREDS_FILE=~/.raxpub RAX_ACCESS_NETWORK=private rax.py --list +""" import os import re import sys import argparse +import warnings import collections +import ConfigParser -from types import NoneType +from ansible.constants import get_config, mk_boolean try: import json -except: +except ImportError: import simplejson as json try: import pyrax + from pyrax.utils import slugify except ImportError: print('pyrax is required for this module') sys.exit(1) -NON_CALLABLES = (basestring, bool, dict, int, list, NoneType) +NON_CALLABLES = (basestring, bool, dict, int, list, type(None)) + + +def load_config_file(): + p = ConfigParser.ConfigParser() + config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'rax.ini') + try: + p.read(config_file) + except ConfigParser.Error: + return None + else: + return p +p = load_config_file() def rax_slugify(value): @@ -126,7 +191,7 @@ def to_dict(obj): instance = {} for key in dir(obj): value = getattr(obj, key) - if (isinstance(value, NON_CALLABLES) and not key.startswith('_')): + if isinstance(value, NON_CALLABLES) and not key.startswith('_'): key = rax_slugify(key) instance[key] = value @@ -153,11 +218,33 @@ def _list(regions): groups = collections.defaultdict(list) hostvars = collections.defaultdict(dict) images = {} + cbs_attachments = collections.defaultdict(dict) + + prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta') + + networks = get_config(p, 'rax', 'access_network', 'RAX_ACCESS_NETWORK', + 'public', islist=True) + try: + ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', + 'RAX_ACCESS_IP_VERSION', 4, + islist=True)) + except: + ip_versions = [4] + else: + ip_versions = [v for v in ip_versions if v in [4, 6]] + if not ip_versions: + ip_versions = [4] # Go through all the regions looking for servers for region in regions: # Connect to the region cs = pyrax.connect_to_cloudservers(region=region) + if cs is None: + warnings.warn( + 'Connecting to Rackspace region "%s" has caused Pyrax to ' + 'return a NoneType. Is this a valid region?' % region, + RuntimeWarning) + continue for server in cs.servers.list(): # Create a group on region groups[region].append(server.name) @@ -178,11 +265,33 @@ def _list(regions): hostvars[server.name]['rax_region'] = region for key, value in server.metadata.iteritems(): - prefix = os.getenv('RAX_META_PREFIX', 'meta') groups['%s_%s_%s' % (prefix, key, value)].append(server.name) groups['instance-%s' % server.id].append(server.name) groups['flavor-%s' % server.flavor['id']].append(server.name) + + # Handle boot from volume + if not server.image: + if not cbs_attachments[region]: + cbs = pyrax.connect_to_cloud_blockstorage(region) + for vol in cbs.list(): + if mk_boolean(vol.bootable): + for attachment in vol.attachments: + metadata = vol.volume_image_metadata + server_id = attachment['server_id'] + cbs_attachments[region][server_id] = { + 'id': metadata['image_id'], + 'name': slugify(metadata['image_name']) + } + image = cbs_attachments[region].get(server.id) + if image: + server.image = {'id': image['id']} + hostvars[server.name]['rax_image'] = server.image + hostvars[server.name]['rax_boot_source'] = 'volume' + images[image['id']] = image['name'] + else: + hostvars[server.name]['rax_boot_source'] = 'local' + try: imagegroup = 'image-%s' % images[server.image['id']] groups[imagegroup].append(server.name) @@ -198,7 +307,30 @@ def _list(regions): groups['image-%s' % server.image['id']].append(server.name) # And finally, add an IP address - hostvars[server.name]['ansible_ssh_host'] = server.accessIPv4 + ansible_ssh_host = None + # use accessIPv[46] instead of looping address for 'public' + for network_name in networks: + if ansible_ssh_host: + break + if network_name == 'public': + for version_name in ip_versions: + if ansible_ssh_host: + break + if version_name == 6 and server.accessIPv6: + ansible_ssh_host = server.accessIPv6 + elif server.accessIPv4: + ansible_ssh_host = server.accessIPv4 + if not ansible_ssh_host: + addresses = server.addresses.get(network_name, []) + for address in addresses: + for version_name in ip_versions: + if ansible_ssh_host: + break + if address.get('version') == version_name: + ansible_ssh_host = address.get('addr') + break + if ansible_ssh_host: + hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host if hostvars: groups['_meta'] = {'hostvars': hostvars} @@ -218,16 +350,18 @@ def parse_args(): def setup(): default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials') - env = os.getenv('RAX_ENV', None) + env = get_config(p, 'rax', 'environment', 'RAX_ENV', None) if env: pyrax.set_environment(env) keyring_username = pyrax.get_setting('keyring_username') # Attempt to grab credentials from environment first - try: - creds_file = os.path.expanduser(os.environ['RAX_CREDS_FILE']) - except KeyError, e: + creds_file = get_config(p, 'rax', 'creds_file', + 'RAX_CREDS_FILE', None) + if creds_file is not None: + creds_file = os.path.expanduser(creds_file) + else: # But if that fails, use the default location of # ~/.rackspace_cloud_credentials if os.path.isfile(default_creds_file): @@ -235,7 +369,7 @@ def setup(): elif not keyring_username: sys.stderr.write('No value in environment variable %s and/or no ' 'credentials file at %s\n' - % (e.message, default_creds_file)) + % ('RAX_CREDS_FILE', default_creds_file)) sys.exit(1) identity_type = pyrax.get_setting('identity_type') @@ -256,7 +390,9 @@ def setup(): if region: regions.append(region) else: - for region in os.getenv('RAX_REGION', 'all').split(','): + region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', + islist=True) + for region in region_list: region = region.strip().upper() if region == 'ALL': regions = pyrax.regions diff --git a/plugins/inventory/vbox.py b/plugins/inventory/vbox.py new file mode 100755 index 00000000000..ff31785d7e3 --- /dev/null +++ b/plugins/inventory/vbox.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python + +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import sys +from subprocess import Popen,PIPE + +try: + import json +except ImportError: + import simplejson as json + + +VBOX="VBoxManage" + + +def get_hosts(host=None): + + returned = {} + try: + if host: + p = Popen([VBOX, 'showvminfo', host], stdout=PIPE) + else: + returned = { 'all': set(), '_metadata': {} } + p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE) + except: + sys.exit(1) + + hostvars = {} + prevkey = pref_k = '' + + for line in p.stdout.readlines(): + + try: + k,v = line.split(':',1) + except: + continue + + if k == '': + continue + + v = v.strip() + if k.startswith('Name'): + if v not in hostvars: + curname = v + hostvars[curname] = {} + try: # try to get network info + x = Popen([VBOX, 'guestproperty', 'get', curname,"/VirtualBox/GuestInfo/Net/0/V4/IP"],stdout=PIPE) + ipinfo = x.stdout.read() + if 'Value' in ipinfo: + a,ip = ipinfo.split(':',1) + hostvars[curname]['ansible_ssh_host'] = ip.strip() + except: + pass + + continue + + if not host: + if k == 'Groups': + for group in v.split('/'): + if group: + if group not in returned: + returned[group] = set() + returned[group].add(curname) + returned['all'].add(curname) + continue + + pref_k = 'vbox_' + k.strip().replace(' ','_') + if k.startswith(' '): + if prevkey not in hostvars[curname]: + hostvars[curname][prevkey] = {} + hostvars[curname][prevkey][pref_k]= v + else: + if v != '': + hostvars[curname][pref_k] = v + + prevkey = pref_k + + if not host: + returned['_metadata']['hostvars'] = hostvars + else: + returned = hostvars[host] + return returned + + +if __name__ == '__main__': + + inventory = {} + hostname = None + + if len(sys.argv) > 1: + if sys.argv[1] == "--host": + hostname = sys.argv[2] + + if hostname: + inventory = get_hosts(hostname) + else: + inventory = get_hosts() + + import pprint + print pprint.pprint(inventory) diff --git a/plugins/inventory/vmware.ini b/plugins/inventory/vmware.ini index aea3591860e..964be18c14e 100644 --- a/plugins/inventory/vmware.ini +++ b/plugins/inventory/vmware.ini @@ -1,15 +1,39 @@ -# Ansible vmware external inventory script settings -# -[defaults] -guests_only = True -#vm_group = -#hw_group = +# Ansible VMware external inventory script settings -[cache] -max_age = 3600 -dir = ~/.cache/ansible +[defaults] + +# If true (the default), return only guest VMs. If false, also return host +# systems in the results. +guests_only = True + +# Specify an alternate group name for guest VMs. If not defined, defaults to +# the basename of the inventory script + "_vm", e.g. "vmware_vm". +#vm_group = vm_group_name + +# Specify an alternate group name for host systems when guests_only=false. +# If not defined, defaults to the basename of the inventory script + "_hw", +# e.g. "vmware_hw". +#hw_group = hw_group_name + +# Specify the number of seconds to use the inventory cache before it is +# considered stale. If not defined, defaults to 0 seconds. +#cache_max_age = 3600 + +# Specify the directory used for storing the inventory cache. If not defined, +# caching will be disabled. +#cache_dir = ~/.cache/ansible [auth] + +# Specify hostname or IP address of vCenter/ESXi server. A port may be +# included with the hostname, e.g.: vcenter.example.com:8443. This setting +# may also be defined via the VMWARE_HOST environment variable. host = vcenter.example.com + +# Specify a username to access the vCenter host. This setting may also be +# defined with the VMWARE_USER environment variable. user = ihasaccess + +# Specify a password to access the vCenter host. This setting may also be +# defined with the VMWARE_PASSWORD environment variable. password = ssshverysecret diff --git a/plugins/inventory/vmware.py b/plugins/inventory/vmware.py index dad54054980..92030d66e56 100755 --- a/plugins/inventory/vmware.py +++ b/plugins/inventory/vmware.py @@ -1,205 +1,424 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- ''' -VMWARE external inventory script -================================= +VMware Inventory Script +======================= -shamelessly copied from existing inventory scripts. +Retrieve information about virtual machines from a vCenter server or +standalone ESX host. When `group_by=false` (in the INI file), host systems +are also returned in addition to VMs. -This script and it's ini can be used more than once, +This script will attempt to read configuration from an INI file with the same +base filename if present, or `vmware.ini` if not. It is possible to create +symlinks to the inventory script to support multiple configurations, e.g.: -i.e. vmware.py/vmware_colo.ini vmware_idf.py/vmware_idf.ini -(script can be link) +* `vmware.py` (this script) +* `vmware.ini` (default configuration, will be read by `vmware.py`) +* `vmware_test.py` (symlink to `vmware.py`) +* `vmware_test.ini` (test configuration, will be read by `vmware_test.py`) +* `vmware_other.py` (symlink to `vmware.py`, will read `vmware.ini` since no + `vmware_other.ini` exists) -so if you don't have clustered vcenter but multiple esx machines or -just diff clusters you can have an inventory per each and automatically -group hosts based on file name or specify a group in the ini. +The path to an INI file may also be specified via the `VMWARE_INI` environment +variable, in which case the filename matching rules above will not apply. -You can also use _HOST|USER|PASSWORD environment variables -to override the ini. +Host and authentication parameters may be specified via the `VMWARE_HOST`, +`VMWARE_USER` and `VMWARE_PASSWORD` environment variables; these options will +take precedence over options present in the INI file. An INI file is not +required if these options are specified using environment variables. ''' +import collections +import json +import logging +import optparse import os import sys import time import ConfigParser -from psphere.client import Client -from psphere.managedobjects import HostSystem +# Disable logging message trigged by pSphere/suds. try: - import json + from logging import NullHandler except ImportError: - import simplejson as json + from logging import Handler + class NullHandler(Handler): + def emit(self, record): + pass +logging.getLogger('psphere').addHandler(NullHandler()) +logging.getLogger('suds').addHandler(NullHandler()) + +from psphere.client import Client +from psphere.errors import ObjectNotFoundError +from psphere.managedobjects import HostSystem, VirtualMachine, ManagedObject, Network +from suds.sudsobject import Object as SudsObject -def save_cache(cache_item, data, config): - ''' saves item to cache ''' +class VMwareInventory(object): + + def __init__(self, guests_only=None): + self.config = ConfigParser.SafeConfigParser() + if os.environ.get('VMWARE_INI', ''): + config_files = [os.environ['VMWARE_INI']] + else: + config_files = [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini'] + for config_file in config_files: + if os.path.exists(config_file): + self.config.read(config_file) + break - if config.has_option('cache', 'dir'): - dpath = os.path.expanduser(config.get('cache', 'dir')) + # Retrieve only guest VMs, or include host systems? + if guests_only is not None: + self.guests_only = guests_only + elif self.config.has_option('defaults', 'guests_only'): + self.guests_only = self.config.getboolean('defaults', 'guests_only') + else: + self.guests_only = True + + # Read authentication information from VMware environment variables + # (if set), otherwise from INI file. + auth_host = os.environ.get('VMWARE_HOST') + if not auth_host and self.config.has_option('auth', 'host'): + auth_host = self.config.get('auth', 'host') + auth_user = os.environ.get('VMWARE_USER') + if not auth_user and self.config.has_option('auth', 'user'): + auth_user = self.config.get('auth', 'user') + auth_password = os.environ.get('VMWARE_PASSWORD') + if not auth_password and self.config.has_option('auth', 'password'): + auth_password = self.config.get('auth', 'password') + + # Create the VMware client connection. + self.client = Client(auth_host, auth_user, auth_password) + + def _put_cache(self, name, value): + ''' + Saves the value to cache with the name given. + ''' + if self.config.has_option('defaults', 'cache_dir'): + cache_dir = self.config.get('defaults', 'cache_dir') + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + cache_file = os.path.join(cache_dir, name) + with open(cache_file, 'w') as cache: + json.dump(value, cache) + + def _get_cache(self, name, default=None): + ''' + Retrieves the value from cache for the given name. + ''' + if self.config.has_option('defaults', 'cache_dir'): + cache_dir = self.config.get('defaults', 'cache_dir') + cache_file = os.path.join(cache_dir, name) + if os.path.exists(cache_file): + if self.config.has_option('defaults', 'cache_max_age'): + cache_max_age = self.config.getint('defaults', 'cache_max_age') + else: + cache_max_age = 0 + cache_stat = os.stat(cache_file) + if (cache_stat.st_mtime + cache_max_age) < time.time(): + with open(cache_file) as cache: + return json.load(cache) + return default + + def _flatten_dict(self, d, parent_key='', sep='_'): + ''' + Flatten nested dicts by combining keys with a separator. Lists with + only string items are included as is; any other lists are discarded. + ''' + items = [] + for k, v in d.items(): + if k.startswith('_'): + continue + new_key = parent_key + sep + k if parent_key else k + if isinstance(v, collections.MutableMapping): + items.extend(self._flatten_dict(v, new_key, sep).items()) + elif isinstance(v, (list, tuple)): + if all([isinstance(x, basestring) for x in v]): + items.append((new_key, v)) + else: + items.append((new_key, v)) + return dict(items) + + def _get_obj_info(self, obj, depth=99, seen=None): + ''' + Recursively build a data structure for the given pSphere object (depth + only applies to ManagedObject instances). + ''' + seen = seen or set() + if isinstance(obj, ManagedObject): + try: + obj_unicode = unicode(getattr(obj, 'name')) + except AttributeError: + obj_unicode = () + if obj in seen: + return obj_unicode + seen.add(obj) + if depth <= 0: + return obj_unicode + d = {} + for attr in dir(obj): + if attr.startswith('_'): + continue + try: + val = getattr(obj, attr) + obj_info = self._get_obj_info(val, depth - 1, seen) + if obj_info != (): + d[attr] = obj_info + except Exception, e: + pass + return d + elif isinstance(obj, SudsObject): + d = {} + for key, val in iter(obj): + obj_info = self._get_obj_info(val, depth, seen) + if obj_info != (): + d[key] = obj_info + return d + elif isinstance(obj, (list, tuple)): + l = [] + for val in iter(obj): + obj_info = self._get_obj_info(val, depth, seen) + if obj_info != (): + l.append(obj_info) + return l + elif isinstance(obj, (type(None), bool, int, long, float, basestring)): + return obj + else: + return () + + def _get_host_info(self, host, prefix='vmware'): + ''' + Return a flattened dict with info about the given host system. + ''' + host_info = { + 'name': host.name, + } + for attr in ('datastore', 'network', 'vm'): + try: + value = getattr(host, attr) + host_info['%ss' % attr] = self._get_obj_info(value, depth=0) + except AttributeError: + host_info['%ss' % attr] = [] + for k, v in self._get_obj_info(host.summary, depth=0).items(): + if isinstance(v, collections.MutableMapping): + for k2, v2 in v.items(): + host_info[k2] = v2 + elif k != 'host': + host_info[k] = v try: - if not os.path.exists(dpath): - os.makedirs(dpath) - if os.path.isdir(dpath): - cache = open('/'.join([dpath,cache_item]), 'w') - cache.write(json.dumps(data)) - cache.close() - except IOError, e: - pass # not really sure what to do here + host_info['ipAddress'] = host.config.network.vnic[0].spec.ip.ipAddress + except Exception, e: + print >> sys.stderr, e + host_info = self._flatten_dict(host_info, prefix) + if ('%s_ipAddress' % prefix) in host_info: + host_info['ansible_ssh_host'] = host_info['%s_ipAddress' % prefix] + return host_info - -def get_cache(cache_item, config): - ''' returns cached item ''' - - inv = {} - if config.has_option('cache', 'dir'): - dpath = os.path.expanduser(config.get('cache', 'dir')) + def _get_vm_info(self, vm, prefix='vmware'): + ''' + Return a flattened dict with info about the given virtual machine. + ''' + vm_info = { + 'name': vm.name, + } + for attr in ('datastore', 'network'): + try: + value = getattr(vm, attr) + vm_info['%ss' % attr] = self._get_obj_info(value, depth=0) + except AttributeError: + vm_info['%ss' % attr] = [] try: - cache = open('/'.join([dpath,cache_item]), 'r') - inv = json.loads(cache.read()) - cache.close() - except IOError, e: - pass # not really sure what to do here - - return inv - -def cache_available(cache_item, config): - ''' checks if we have a 'fresh' cache available for item requested ''' - - if config.has_option('cache', 'dir'): - dpath = os.path.expanduser(config.get('cache', 'dir')) - + vm_info['resourcePool'] = self._get_obj_info(vm.resourcePool, depth=0) + except AttributeError: + vm_info['resourcePool'] = '' try: - existing = os.stat('/'.join([dpath,cache_item])) - except: - # cache doesn't exist or isn't accessible - return False + vm_info['guestState'] = vm.guest.guestState + except AttributeError: + vm_info['guestState'] = '' + for k, v in self._get_obj_info(vm.summary, depth=0).items(): + if isinstance(v, collections.MutableMapping): + for k2, v2 in v.items(): + if k2 == 'host': + k2 = 'hostSystem' + vm_info[k2] = v2 + elif k != 'vm': + vm_info[k] = v + vm_info = self._flatten_dict(vm_info, prefix) + if ('%s_ipAddress' % prefix) in vm_info: + vm_info['ansible_ssh_host'] = vm_info['%s_ipAddress' % prefix] + return vm_info - if config.has_option('cache', 'max_age'): - maxage = config.get('cache', 'max_age') - fileage = int( time.time() - existing.st_mtime ) - if (maxage > fileage): - return True + def _add_host(self, inv, parent_group, host_name): + ''' + Add the host to the parent group in the given inventory. + ''' + p_group = inv.setdefault(parent_group, []) + if isinstance(p_group, dict): + group_hosts = p_group.setdefault('hosts', []) + else: + group_hosts = p_group + if host_name not in group_hosts: + group_hosts.append(host_name) - return False + def _add_child(self, inv, parent_group, child_group): + ''' + Add a child group to a parent group in the given inventory. + ''' + if parent_group != 'all': + p_group = inv.setdefault(parent_group, {}) + if not isinstance(p_group, dict): + inv[parent_group] = {'hosts': p_group} + p_group = inv[parent_group] + group_children = p_group.setdefault('children', []) + if child_group not in group_children: + group_children.append(child_group) + inv.setdefault(child_group, []) -def get_host_info(host): - ''' Get variables about a specific host ''' + def get_inventory(self, meta_hostvars=True): + ''' + Reads the inventory from cache or VMware API via pSphere. + ''' + # Use different cache names for guests only vs. all hosts. + if self.guests_only: + cache_name = '__inventory_guests__' + else: + cache_name = '__inventory_all__' - hostinfo = { - 'vmware_name' : host.name, - } - for k in host.capability.__dict__.keys(): - if k.startswith('_'): - continue - try: - hostinfo['vmware_' + k] = str(host.capability[k]) - except: - continue + inv = self._get_cache(cache_name, None) + if inv is not None: + return inv - return hostinfo + inv = {'all': {'hosts': []}} + if meta_hostvars: + inv['_meta'] = {'hostvars': {}} - -def get_inventory(client, config): - ''' Reads the inventory from cache or vmware api ''' - - inv = {} - - if cache_available('inventory', config): - inv = get_cache('inventory',config) - elif client: - inv= { 'all': {'hosts': []}, '_meta': { 'hostvars': {} } } default_group = os.path.basename(sys.argv[0]).rstrip('.py') - if config.has_option('defaults', 'guests_only'): - guests_only = config.get('defaults', 'guests_only') - else: - guests_only = True - - if not guests_only: - if config.has_option('defaults','hw_group'): - hw_group = config.get('defaults','hw_group') + if not self.guests_only: + if self.config.has_option('defaults', 'hw_group'): + hw_group = self.config.get('defaults', 'hw_group') else: hw_group = default_group + '_hw' - inv[hw_group] = [] - if config.has_option('defaults','vm_group'): - vm_group = config.get('defaults','vm_group') + if self.config.has_option('defaults', 'vm_group'): + vm_group = self.config.get('defaults', 'vm_group') else: vm_group = default_group + '_vm' - inv[vm_group] = [] # Loop through physical hosts: - hosts = HostSystem.all(client) - for host in hosts: - if not guests_only: - inv['all']['hosts'].append(host.name) - inv[hw_group].append(host.name) - inv['_meta']['hostvars'][host.name] = get_host_info(host) - save_cache(vm.name, inv['_meta']['hostvars'][host.name], config) + for host in HostSystem.all(self.client): + if not self.guests_only: + self._add_host(inv, 'all', host.name) + self._add_host(inv, hw_group, host.name) + host_info = self._get_host_info(host) + if meta_hostvars: + inv['_meta']['hostvars'][host.name] = host_info + self._put_cache(host.name, host_info) + + # Loop through all VMs on physical host. for vm in host.vm: - inv['all']['hosts'].append(vm.name) - inv[vm_group].append(vm.name) - inv['_meta']['hostvars'][vm.name] = get_host_info(vm) - save_cache(vm.name, inv['_meta']['hostvars'][vm.name], config) + self._add_host(inv, 'all', vm.name) + self._add_host(inv, vm_group, vm.name) + vm_info = self._get_vm_info(vm) + if meta_hostvars: + inv['_meta']['hostvars'][vm.name] = vm_info + self._put_cache(vm.name, vm_info) - save_cache('inventory', inv, config) + # Group by resource pool. + vm_resourcePool = vm_info.get('vmware_resourcePool', None) + if vm_resourcePool: + self._add_child(inv, vm_group, 'resource_pools') + self._add_child(inv, 'resource_pools', vm_resourcePool) + self._add_host(inv, vm_resourcePool, vm.name) - return json.dumps(inv) + # Group by datastore. + for vm_datastore in vm_info.get('vmware_datastores', []): + self._add_child(inv, vm_group, 'datastores') + self._add_child(inv, 'datastores', vm_datastore) + self._add_host(inv, vm_datastore, vm.name) -def get_single_host(client, config, hostname): + # Group by network. + for vm_network in vm_info.get('vmware_networks', []): + self._add_child(inv, vm_group, 'networks') + self._add_child(inv, 'networks', vm_network) + self._add_host(inv, vm_network, vm.name) - inv = {} - if cache_available(hostname, config): - inv = get_cache(hostname,config) - elif client: - hosts = HostSystem.all(client) #TODO: figure out single host getter - for host in hosts: - if hostname == host.name: - inv = get_host_info(host) - break - for vm in host.vm: - if hostname == vm.name: - inv = get_host_info(vm) - break - save_cache(hostname,inv,config) + # Group by guest OS. + vm_guestId = vm_info.get('vmware_guestId', None) + if vm_guestId: + self._add_child(inv, vm_group, 'guests') + self._add_child(inv, 'guests', vm_guestId) + self._add_host(inv, vm_guestId, vm.name) + + # Group all VM templates. + vm_template = vm_info.get('vmware_template', False) + if vm_template: + self._add_child(inv, vm_group, 'templates') + self._add_host(inv, 'templates', vm.name) + + self._put_cache(cache_name, inv) + return inv + + def get_host(self, hostname): + ''' + Read info about a specific host or VM from cache or VMware API. + ''' + inv = self._get_cache(hostname, None) + if inv is not None: + return inv + + if not self.guests_only: + try: + host = HostSystem.get(self.client, name=hostname) + inv = self._get_host_info(host) + except ObjectNotFoundError: + pass + + if inv is None: + try: + vm = VirtualMachine.get(self.client, name=hostname) + inv = self._get_vm_info(vm) + except ObjectNotFoundError: + pass + + if inv is not None: + self._put_cache(hostname, inv) + return inv or {} + + +def main(): + parser = optparse.OptionParser() + parser.add_option('--list', action='store_true', dest='list', + default=False, help='Output inventory groups and hosts') + parser.add_option('--host', dest='host', default=None, metavar='HOST', + help='Output variables only for the given hostname') + # Additional options for use when running the script standalone, but never + # used by Ansible. + parser.add_option('--pretty', action='store_true', dest='pretty', + default=False, help='Output nicely-formatted JSON') + parser.add_option('--include-host-systems', action='store_true', + dest='include_host_systems', default=False, + help='Include host systems in addition to VMs') + parser.add_option('--no-meta-hostvars', action='store_false', + dest='meta_hostvars', default=True, + help='Exclude [\'_meta\'][\'hostvars\'] with --list') + options, args = parser.parse_args() + + if options.include_host_systems: + vmware_inventory = VMwareInventory(guests_only=False) + else: + vmware_inventory = VMwareInventory() + if options.host is not None: + inventory = vmware_inventory.get_host(options.host) + else: + inventory = vmware_inventory.get_inventory(options.meta_hostvars) + + json_kwargs = {} + if options.pretty: + json_kwargs.update({'indent': 4, 'sort_keys': True}) + json.dump(inventory, sys.stdout, **json_kwargs) - return json.dumps(inv) if __name__ == '__main__': - - inventory = {} - hostname = None - - if len(sys.argv) > 1: - if sys.argv[1] == "--host": - hostname = sys.argv[2] - - # Read config - config = ConfigParser.SafeConfigParser() - me = os.path.abspath(sys.argv[0]).rstrip('.py') - for configfilename in [me + '.ini', 'vmware.ini']: - if os.path.exists(configfilename): - config.read(configfilename) - break - - mename = os.path.basename(me).upper() - host = os.getenv('VMWARE_' + mename + '_HOST',os.getenv('VMWARE_HOST', config.get('auth','host'))) - user = os.getenv('VMWARE_' + mename + '_USER', os.getenv('VMWARE_USER', config.get('auth','user'))) - password = os.getenv('VMWARE_' + mename + '_PASSWORD',os.getenv('VMWARE_PASSWORD', config.get('auth','password'))) - - try: - client = Client( host,user,password ) - except Exception, e: - client = None - #print >> STDERR "Unable to login (only cache available): %s", str(e) - - # Actually do the work - if hostname is None: - inventory = get_inventory(client, config) - else: - inventory = get_single_host(client, config, hostname) - - # Return to ansible - print inventory + main() diff --git a/plugins/inventory/zabbix.py b/plugins/inventory/zabbix.py index 68cc5cc57b3..2bc1e2e1ccc 100755 --- a/plugins/inventory/zabbix.py +++ b/plugins/inventory/zabbix.py @@ -20,7 +20,7 @@ ###################################################################### """ -Zabbix Server external inventory script. +Zabbix Server external inventory script. ======================================== Returns hosts and hostgroups from Zabbix Server. @@ -31,7 +31,6 @@ Tested with Zabbix Server 2.0.6. """ import os, sys -import json import argparse import ConfigParser @@ -55,7 +54,7 @@ class ZabbixInventory(object): if config.has_option('zabbix', 'server'): self.zabbix_server = config.get('zabbix', 'server') - # login + # login if config.has_option('zabbix', 'username'): self.zabbix_username = config.get('zabbix', 'username') if config.has_option('zabbix', 'password'): @@ -84,7 +83,7 @@ class ZabbixInventory(object): for host in hostsData: hostname = host['name'] - data[self.defaultgroup]['hosts'].append(hostname) + data[self.defaultgroup]['hosts'].append(hostname) for group in host['groups']: groupname = group['name'] diff --git a/setup.py b/setup.py index de4b251feec..e855ea3bfaf 100644 --- a/setup.py +++ b/setup.py @@ -7,26 +7,13 @@ from glob import glob sys.path.insert(0, os.path.abspath('lib')) from ansible import __version__, __author__ try: - from setuptools import setup + from setuptools import setup, find_packages except ImportError: - print "Ansible now needs setuptools in order to build. " \ - "Install it using your package manager (usually python-setuptools) or via pip (pip install setuptools)." + print("Ansible now needs setuptools in order to build. Install it using" + " your package manager (usually python-setuptools) or via pip (pip" + " install setuptools).") sys.exit(1) -# find library modules -from ansible.constants import DEFAULT_MODULE_PATH -module_paths = DEFAULT_MODULE_PATH.split(os.pathsep) -# always install in /usr/share/ansible if specified -# otherwise use the first module path listed -if '/usr/share/ansible' in module_paths: - install_path = '/usr/share/ansible' -else: - install_path = module_paths[0] -dirs=os.listdir("./library/") -data_files = [] -for i in dirs: - data_files.append((os.path.join(install_path, i), glob('./library/' + i + '/*'))) - setup(name='ansible', version=__version__, description='Radically simple IT automation', @@ -36,25 +23,9 @@ setup(name='ansible', license='GPLv3', install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6'], package_dir={ 'ansible': 'lib/ansible' }, - packages=[ - 'ansible', - 'ansible.cache', - 'ansible.utils', - 'ansible.utils.module_docs_fragments', - 'ansible.inventory', - 'ansible.inventory.vars_plugins', - 'ansible.playbook', - 'ansible.runner', - 'ansible.runner.action_plugins', - 'ansible.runner.lookup_plugins', - 'ansible.runner.connection_plugins', - 'ansible.runner.shell_plugins', - 'ansible.runner.filter_plugins', - 'ansible.callback_plugins', - 'ansible.module_utils' - ], + packages=find_packages('lib'), package_data={ - '': ['module_utils/*.ps1'], + '': ['module_utils/*.ps1', 'modules/core/windows/*.ps1', 'modules/extras/windows/*.ps1'], }, scripts=[ 'bin/ansible', @@ -64,5 +35,5 @@ setup(name='ansible', 'bin/ansible-galaxy', 'bin/ansible-vault', ], - data_files=data_files + data_files=[], ) diff --git a/test/README.md b/test/README.md index 3e746062cd1..bb3f229d1f1 100644 --- a/test/README.md +++ b/test/README.md @@ -12,7 +12,7 @@ mock interfaces rather than producing side effects. Playbook engine code is better suited for integration tests. -Requirements: sudo pip install paramiko PyYAML jinja2 httplib2 passlib +Requirements: sudo pip install paramiko PyYAML jinja2 httplib2 passlib nose mock integration ----------- diff --git a/test/integration/Makefile b/test/integration/Makefile index 4bccc8cd9f1..c708e08e402 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -15,24 +15,32 @@ CREDENTIALS_ARG = endif # http://unix.stackexchange.com/questions/30091/fix-or-alternative-for-mktemp-in-os-x -TMPDIR = $(shell mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir') +MYTMPDIR = $(shell mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir') VAULT_PASSWORD_FILE = vault-password -all: non_destructive destructive includes unicode test_var_precedence check_mode test_hash test_handlers test_group_by test_vault parsing +CONSUL_RUNNING := $(shell python consul_running.py) + +all: parsing test_var_precedence unicode non_destructive destructive includes check_mode test_hash test_handlers test_group_by test_vault parsing: - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags common,scenario1; [ $$? -eq 3 ] - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags common,scenario2; [ $$? -eq 3 ] - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags common,scenario3; [ $$? -eq 3 ] - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags common,scenario4; [ $$? -eq 3 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario1; [ $$? -eq 3 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario2; [ $$? -eq 3 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario3; [ $$? -eq 3 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario4; [ $$? -eq 3 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario5; [ $$? -eq 3 ] ansible-playbook good_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) includes: - ansible-playbook test_includes.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS) + ansible-playbook test_includes.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS) unicode: - ansible-playbook unicode.yml -i $(INVENTORY) -e @$(VARS_FILE) -v $(TEST_FLAGS) + ansible-playbook unicode.yml -i $(INVENTORY) -e @$(VARS_FILE) -v $(TEST_FLAGS) -e 'extra_var=café' + # Test the start-at-task flag #9571 + ansible-playbook unicode.yml -i $(INVENTORY) -e @$(VARS_FILE) -v --start-at-task '*¶' -e 'start_at_task=True' $(TEST_FLAGS) + +mine: + ansible-playbook mine.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) non_destructive: ansible-playbook non_destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) @@ -54,7 +62,7 @@ test_hash: ANSIBLE_HASH_BEHAVIOUR=merge ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' test_var_precedence: - ansible-playbook test_var_precedence.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e 'extra_var=extra_var' + ansible-playbook test_var_precedence.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e 'extra_var=extra_var' -e 'extra_var_override=extra_var_override' test_vault: ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) --list-tasks @@ -62,6 +70,15 @@ test_vault: ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) --syntax-check ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) +# test_delegate_to does not work unless we have permission to ssh to localhost. +# Would take some more effort on our test systems to implement that -- probably +# the test node should create an ssh public-private key pair that allows the +# root user on a node to ssh to itself. Until then, this is not in make all. +# Have to run it manually. Ordinary users should be able to run this test as +# long as they have permissions to login to their local machine via ssh. +test_delegate_to: + ansible-playbook test_delegate_to.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) + test_winrm: ansible-playbook test_winrm.yml -i inventory.winrm -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) @@ -79,8 +96,7 @@ gce_cleanup: python cleanup_gce.py -y --match="^$(CLOUD_RESOURCE_PREFIX)" rackspace_cleanup: - @echo "FIXME - cleanup_rax.py not yet implemented" - @# python cleanup_rax.py -y --match="^$(CLOUD_RESOURCE_PREFIX)" + python cleanup_rax.py -y --match="^$(CLOUD_RESOURCE_PREFIX)" $(CREDENTIALS_FILE): @echo "No credentials file found. A file named '$(CREDENTIALS_FILE)' is needed to provide credentials needed to run cloud tests. See sample 'credentials.template' file." @@ -105,10 +121,20 @@ rackspace: $(CREDENTIALS_FILE) CLOUD_RESOURCE_PREFIX="$(CLOUD_RESOURCE_PREFIX)" make rackspace_cleanup ; \ exit $$RC; +$(CONSUL_RUNNING): + +consul: +ifeq ($(CONSUL_RUNNING), True) + ansible-playbook -i $(INVENTORY) consul.yml ; \ + ansible-playbook -i ../../plugins/inventory/consul_io.py consul_inventory.yml +else + @echo "Consul agent is not running locally. To run a cluster locally see http://github.com/sgargan/consul-vagrant" +endif + test_galaxy: test_galaxy_spec test_galaxy_yaml test_galaxy_spec: - mytmpdir=$(TMPDIR) ; \ + mytmpdir=$(MYTMPDIR) ; \ ansible-galaxy install -r galaxy_rolesfile -p $$mytmpdir/roles ; \ cp galaxy_playbook.yml $$mytmpdir ; \ ansible-playbook -i $(INVENTORY) $$mytmpdir/galaxy_playbook.yml -v $(TEST_FLAGS) ; \ @@ -117,7 +143,7 @@ test_galaxy_spec: exit $$RC test_galaxy_yaml: - mytmpdir=$(TMPDIR) ; \ + mytmpdir=$(MYTMPDIR) ; \ ansible-galaxy install -r galaxy_roles.yml -p $$mytmpdir/roles ; \ cp galaxy_playbook.yml $$mytmpdir ; \ ansible-playbook -i $(INVENTORY) $$mytmpdir/galaxy_playbook.yml -v $(TEST_FLAGS) ; \ diff --git a/test/integration/cleanup_rax.py b/test/integration/cleanup_rax.py new file mode 100644 index 00000000000..95f8ba2f0ae --- /dev/null +++ b/test/integration/cleanup_rax.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python + +import os +import re +import yaml +import argparse + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + + +def rax_list_iterator(svc, *args, **kwargs): + method = kwargs.pop('method', 'list') + items = getattr(svc, method)(*args, **kwargs) + while items: + retrieved = getattr(svc, method)(*args, marker=items[-1].id, **kwargs) + if items and retrieved and items[-1].id == retrieved[0].id: + del items[-1] + items.extend(retrieved) + if len(retrieved) < 2: + break + return items + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('-y', '--yes', action='store_true', dest='assumeyes', + default=False, help="Don't prompt for confirmation") + parser.add_argument('--match', dest='match_re', + default='^ansible-testing', + help='Regular expression used to find resources ' + '(default: %(default)s)') + + return parser.parse_args() + + +def authenticate(): + try: + with open(os.path.realpath('./credentials.yml')) as f: + credentials = yaml.load(f) + except Exception as e: + raise SystemExit(e) + + try: + pyrax.set_credentials(credentials.get('rackspace_username'), + credentials.get('rackspace_api_key')) + except Exception as e: + raise SystemExit(e) + + +def prompt_and_delete(item, prompt, assumeyes): + if not assumeyes: + assumeyes = raw_input(prompt).lower() == 'y' + assert (hasattr(item, 'delete') or hasattr(item, 'terminate'), + "Class <%s> has no delete or terminate attribute" % item.__class__) + if assumeyes: + if hasattr(item, 'delete'): + item.delete() + print ("Deleted %s" % item) + if hasattr(item, 'terminate'): + item.terminate() + print ("Terminated %s" % item) + + +def delete_rax(args): + """Function for deleting CloudServers""" + print ("--- Cleaning CloudServers matching '%s'" % args.match_re) + search_opts = dict(name='^%s' % args.match_re) + for region in pyrax.identity.services.compute.regions: + cs = pyrax.connect_to_cloudservers(region=region) + servers = rax_list_iterator(cs.servers, search_opts=search_opts) + for server in servers: + prompt_and_delete(server, + 'Delete matching %s? [y/n]: ' % server, + args.assumeyes) + + +def delete_rax_clb(args): + """Function for deleting Cloud Load Balancers""" + print ("--- Cleaning Cloud Load Balancers matching '%s'" % args.match_re) + for region in pyrax.identity.services.load_balancer.regions: + clb = pyrax.connect_to_cloud_loadbalancers(region=region) + for lb in rax_list_iterator(clb): + if re.search(args.match_re, lb.name): + prompt_and_delete(lb, + 'Delete matching %s? [y/n]: ' % lb, + args.assumeyes) + + +def delete_rax_keypair(args): + """Function for deleting Rackspace Key pairs""" + print ("--- Cleaning Key Pairs matching '%s'" % args.match_re) + for region in pyrax.identity.services.compute.regions: + cs = pyrax.connect_to_cloudservers(region=region) + for keypair in cs.keypairs.list(): + if re.search(args.match_re, keypair.name): + prompt_and_delete(keypair, + 'Delete matching %s? [y/n]: ' % keypair, + args.assumeyes) + + +def delete_rax_network(args): + """Function for deleting Cloud Networks""" + print ("--- Cleaning Cloud Networks matching '%s'" % args.match_re) + for region in pyrax.identity.services.network.regions: + cnw = pyrax.connect_to_cloud_networks(region=region) + for network in cnw.list(): + if re.search(args.match_re, network.name): + prompt_and_delete(network, + 'Delete matching %s? [y/n]: ' % network, + args.assumeyes) + + +def delete_rax_cbs(args): + """Function for deleting Cloud Networks""" + print ("--- Cleaning Cloud Block Storage matching '%s'" % args.match_re) + for region in pyrax.identity.services.network.regions: + cbs = pyrax.connect_to_cloud_blockstorage(region=region) + for volume in cbs.list(): + if re.search(args.match_re, volume.name): + prompt_and_delete(volume, + 'Delete matching %s? [y/n]: ' % volume, + args.assumeyes) + + +def delete_rax_cdb(args): + """Function for deleting Cloud Databases""" + print ("--- Cleaning Cloud Databases matching '%s'" % args.match_re) + for region in pyrax.identity.services.database.regions: + cdb = pyrax.connect_to_cloud_databases(region=region) + for db in rax_list_iterator(cdb): + if re.search(args.match_re, db.name): + prompt_and_delete(db, + 'Delete matching %s? [y/n]: ' % db, + args.assumeyes) + + +def main(): + if not HAS_PYRAX: + raise SystemExit('The pyrax python module is required for this script') + + args = parse_args() + authenticate() + + funcs = [f for n, f in globals().items() if n.startswith('delete_rax')] + for func in sorted(funcs, key=lambda f: f.__name__): + try: + func(args) + except Exception as e: + print ("---- %s failed (%s)" % (func.__name__, e.message)) + + +if __name__ == '__main__': + try: + main() + except KeyboardInterrupt: + print ('\nExiting...') diff --git a/test/integration/consul.yml b/test/integration/consul.yml new file mode 100644 index 00000000000..c85e703e87c --- /dev/null +++ b/test/integration/consul.yml @@ -0,0 +1,82 @@ +- hosts: localhost + connection: local + gather_facts: false + + vars: + # these are the defaults from the consul-vagrant cluster setup + - mgmt_token: '4791402A-D875-4C18-8316-E652DBA53B18' + - acl_host: '11.0.0.2' + - metadata_json: '{"clearance": "top_secret"}' + + pre_tasks: + # this works except for the KV_lookusp + - name: check that the consul agent is running locally + local_action: wait_for port=8500 timeout=5 + ignore_errors: true + register: consul_running + + roles: + + - {role: test_consul_service, + when: not consul_running.failed is defined} + + - {role: test_consul_kv, + when: not consul_running.failed is defined} + + - {role: test_consul_acl, + when: not consul_running.failed is defined} + + - {role: test_consul_session, + when: not consul_running.failed is defined} + + tasks: + + - name: setup services with passing check for consul inventory test + consul: + service_name: nginx + service_port: 80 + script: "sh -c true" + interval: 5 + token: '4791402A-D875-4C18-8316-E652DBA53B18' + tags: + - dev + - master + + + - name: setup failing service for inventory test + consul: + service_name: nginx + service_port: 443 + script: "sh -c false" + interval: 5 + tags: + - qa + - slave + + - name: setup ssh service for inventory test + consul: + service_name: ssh + service_port: 2222 + script: "sh -c true" + interval: 5 + token: '4791402A-D875-4C18-8316-E652DBA53B18' + + - name: update the Anonymous token to allow anon access to kv store + consul_acl: + mgmt_token: '{{mgmt_token}}' + host: '{{acl_host}}' + token: 'anonymous' + rules: + - key: '' + policy: write + register: inventory_token + + - name: add metadata for the node through kv_store + consul_kv: "key=ansible/metadata/dc1/consul-1 value='{{metadata_json}}'" + + - name: add metadata for the node through kv_store + consul_kv: key=ansible/groups/dc1/consul-1 value='a_group, another_group' + + - name: warn that tests are ignored if consul agent is not running + debug: msg="A consul agent needs to be running inorder to run the tests. To setup a vagrant cluster for use in testing see http://github.com/sgargan/consul-vagrant" + when: consul_running.failed is defined diff --git a/test/integration/consul_inventory.yml b/test/integration/consul_inventory.yml new file mode 100644 index 00000000000..0007a0965d4 --- /dev/null +++ b/test/integration/consul_inventory.yml @@ -0,0 +1,19 @@ +- hosts: all;!localhost + gather_facts: false + + pre_tasks: + - name: check that the consul agent is running locally + local_action: wait_for port=8500 timeout=5 + ignore_errors: true + register: consul_running + + roles: + + - {role: test_consul_inventory, + when: not consul_running.failed is defined} + + tasks: + + - name: warn that tests are ignored if consul agent is not running + debug: msg="A consul agent needs to be running inorder to run the tests. To setup a vagrant cluster for use in testing see http://github.com/sgargan/consul-vagrant" + when: consul_running.failed is defined diff --git a/test/integration/consul_running.py b/test/integration/consul_running.py new file mode 100644 index 00000000000..9fdff9ef596 --- /dev/null +++ b/test/integration/consul_running.py @@ -0,0 +1,11 @@ +''' Checks that the consul agent is running locally. ''' + +if __name__ == '__main__': + + try: + import consul + consul = consul.Consul(host='0.0.0.0', port=8500) + consul.catalog.nodes() + print "True" + except: + pass diff --git a/test/integration/credentials.template b/test/integration/credentials.template index 12316254bbd..4894f5827b3 100644 --- a/test/integration/credentials.template +++ b/test/integration/credentials.template @@ -1,4 +1,9 @@ --- +# Rackspace Credentials +rackspace_username: +rackspace_api_key: +rackspace_region: + # AWS Credentials ec2_access_key: ec2_secret_key: diff --git a/test/integration/destructive.yml b/test/integration/destructive.yml index d341c4916b7..47203194821 100644 --- a/test/integration/destructive.yml +++ b/test/integration/destructive.yml @@ -1,13 +1,17 @@ - hosts: testhost gather_facts: True roles: + # In destructive because it creates and removes a user + - { role: test_sudo, tags: test_sudo} - { role: test_service, tags: test_service } - - { role: test_pip, tags: test_pip } + # Current pip unconditionally uses md5. We can re-enable if pip switches + # to a different hash or allows us to not check md5 + - { role: test_pip, tags: test_pip, when: ansible_fips != True } - { role: test_gem, tags: test_gem } - { role: test_yum, tags: test_yum } - { role: test_apt, tags: test_apt } - { role: test_apt_repository, tags: test_apt_repository } + - { role: test_postgresql, tags: test_postgresql} - { role: test_mysql_db, tags: test_mysql_db} - { role: test_mysql_user, tags: test_mysql_user} - - { role: test_mysql_user, tags: test_mysql_user} - { role: test_mysql_variables, tags: test_mysql_variables} diff --git a/test/integration/galaxy_roles.yml b/test/integration/galaxy_roles.yml index cd610068401..76b385191cd 100644 --- a/test/integration/galaxy_roles.yml +++ b/test/integration/galaxy_roles.yml @@ -6,3 +6,8 @@ - src: https://bitbucket.org/willthames/http-ansible-galaxy/get/master.tar.gz name: http-role + +- src: git@github.com:geerlingguy/ansible-role-php.git + scm: git + name: php + diff --git a/test/integration/galaxy_rolesfile b/test/integration/galaxy_rolesfile index a1374925ba7..31596d4914a 100644 --- a/test/integration/galaxy_rolesfile +++ b/test/integration/galaxy_rolesfile @@ -1,3 +1,4 @@ git+http://bitbucket.org/willthames/git-ansible-galaxy,v1.4 hg+http://bitbucket.org/willthames/hg-ansible-galaxy https://bitbucket.org/willthames/http-ansible-galaxy/get/master.tar.gz,,http-role +git+git@github.com:geerlingguy/ansible-role-php.git diff --git a/test/integration/integration_config.yml b/test/integration/integration_config.yml index 4c2fb2a0a50..bf5d6db3de6 100644 --- a/test/integration/integration_config.yml +++ b/test/integration/integration_config.yml @@ -1,4 +1,5 @@ --- +win_output_dir: 'C:/temp/' output_dir: ~/ansible_testing non_root_test_user: ansible pip_test_package: epdb diff --git a/test/integration/inventory b/test/integration/inventory index 59bb395205e..72d80aabebd 100644 --- a/test/integration/inventory +++ b/test/integration/inventory @@ -1,6 +1,9 @@ [local] testhost ansible_ssh_host=127.0.0.1 ansible_connection=local testhost2 ansible_ssh_host=127.0.0.1 ansible_connection=local +# For testing delegate_to +testhost3 ansible_ssh_host=127.0.0.3 +testhost4 ansible_ssh_host=127.0.0.4 # the following inline declarations are accompanied # by (preferred) group_vars/ and host_vars/ variables @@ -9,6 +12,10 @@ testhost2 ansible_ssh_host=127.0.0.1 ansible_connection=local [inven_overridehosts] invenoverride ansible_ssh_host=127.0.0.1 ansible_connection=local +[all:vars] +extra_var_override=FROM_INVENTORY +inven_var=inventory_var + [inven_overridehosts:vars] foo=foo var_dir=vars diff --git a/test/integration/non_destructive.yml b/test/integration/non_destructive.yml index 619396acb25..e520a17ea05 100644 --- a/test/integration/non_destructive.yml +++ b/test/integration/non_destructive.yml @@ -38,3 +38,7 @@ - { role: test_script, tags: test_script } - { role: test_authorized_key, tags: test_authorized_key } - { role: test_get_url, tags: test_get_url } + - { role: test_embedded_module, tags: test_embedded_module } + # Turn on test_binary when we start testing v2 + #- { role: test_binary, tags: test_binary } + diff --git a/test/integration/rackspace.yml b/test/integration/rackspace.yml index a6ba60c13e4..37f9b097b9c 100644 --- a/test/integration/rackspace.yml +++ b/test/integration/rackspace.yml @@ -1,4 +1,42 @@ -- hosts: testhost - gather_facts: True - roles: [] +--- +- hosts: localhost + connection: local + gather_facts: false + tags: + - rackspace + roles: + - role: test_rax + tags: test_rax + - role: test_rax_facts + tags: test_rax_facts + + - role: test_rax_meta + tags: test_rax_meta + + - role: test_rax_keypair + tags: test_rax_keypair + + - role: test_rax_clb + tags: test_rax_clb + + - role: test_rax_clb_nodes + tags: test_rax_clb_nodes + + - role: test_rax_network + tags: test_rax_network + + - role: test_rax_cbs + tags: test_rax_cbs + + - role: test_rax_cbs_attachments + tags: test_rax_cbs_attachments + + - role: test_rax_identity + tags: test_rax_identity + + - role: test_rax_cdb + tags: test_rax_cdb + + - role: test_rax_cdb_database + tags: test_rax_cdb_database diff --git a/test/integration/roles/prepare_rax_tests/defaults/main.yml b/test/integration/roles/prepare_rax_tests/defaults/main.yml new file mode 100644 index 00000000000..ffa72294b8c --- /dev/null +++ b/test/integration/roles/prepare_rax_tests/defaults/main.yml @@ -0,0 +1,10 @@ +--- +rackspace_region: IAD +rackspace_image_human_id: "ubuntu-1404-lts-trusty-tahr-pvhvm" +rackspace_image_id: "753a7703-4960-488b-aab4-a3cdd4b276dc" +rackspace_image_name: "Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)" +rackspace_flavor: "performance1-1" + +rackspace_keypair_pub: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDymofzvt86DUA6XSSxc7eDHwUNvcOSmUWjB76jFvhYc6PbS5QmTzBtCka1ORdaW0Z2i3EjfFvzA8WvuY3qP/FpIVDL25ZqZHgxSfGN5pbJ2tAeXK165kNPXBuuISrMhmdLFbRZNn6PwKHEmtrtfEQ3w6ay9+MhqlEr0OX2r6bCXLj+f50QnQXamU6Fm4IpkTsb60osvHNi569Dd8cADEv92oLZpNMa8/MPGnlipjauhzNtEDTUeZwtrAQUXe6CzJ0QmIlyKDglDZLuAKU/VRumo1FRsn4AwJnVsbP2CHBPkbNoYt6LhQiQqXypEIWGmIln0dlO6gZTr3dYC4BVGREl" + +resource_prefix: ansible-testing diff --git a/test/integration/roles/prepare_win_tests/tasks/main.yml b/test/integration/roles/prepare_win_tests/tasks/main.yml new file mode 100644 index 00000000000..756c977fb19 --- /dev/null +++ b/test/integration/roles/prepare_win_tests/tasks/main.yml @@ -0,0 +1,30 @@ +# test code for the windows versions of copy, file and template module +# originally +# (c) 2014, Michael DeHaan + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +- name: clean out the test directory + win_file: name={{win_output_dir|mandatory}} state=absent + tags: + - prepare + +- name: create the test directory + win_file: name={{win_output_dir}} state=directory + tags: + - prepare + diff --git a/test/integration/roles/setup_postgresql_db/defaults/main.yml b/test/integration/roles/setup_postgresql_db/defaults/main.yml new file mode 100644 index 00000000000..08f3a91b46e --- /dev/null +++ b/test/integration/roles/setup_postgresql_db/defaults/main.yml @@ -0,0 +1,5 @@ +postgresql_service: postgresql + +postgresql_packages: + - postgresql-server + - python-psycopg2 diff --git a/test/integration/roles/setup_postgresql_db/files/pg_hba.conf b/test/integration/roles/setup_postgresql_db/files/pg_hba.conf new file mode 100644 index 00000000000..a8defb8ee6c --- /dev/null +++ b/test/integration/roles/setup_postgresql_db/files/pg_hba.conf @@ -0,0 +1,10 @@ +# !!! This file managed by Ansible. Any local changes may be overwritten. !!! + +# Database administrative login by UNIX sockets +# note: you may wish to restrict this further later +local all postgres trust + +# TYPE DATABASE USER CIDR-ADDRESS METHOD +local all all md5 +host all all 127.0.0.1/32 md5 +host all all ::1/128 md5 diff --git a/test/integration/roles/setup_postgresql_db/tasks/main.yml b/test/integration/roles/setup_postgresql_db/tasks/main.yml new file mode 100644 index 00000000000..fbcc9cab725 --- /dev/null +++ b/test/integration/roles/setup_postgresql_db/tasks/main.yml @@ -0,0 +1,81 @@ +- include_vars: '{{ item }}' + with_first_found: + - files: + - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml' + - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml' + - '{{ ansible_os_family }}.yml' + - 'default.yml' + paths: '../vars' + +# Make sure we start fresh +- name: remove rpm dependencies for postgresql test + yum: name={{ item }} state=absent + with_items: postgresql_packages + when: ansible_pkg_mgr == 'yum' + +- name: remove dpkg dependencies for postgresql test + apt: name={{ item }} state=absent + with_items: postgresql_packages + when: ansible_pkg_mgr == 'apt' + +- name: remove old db (red hat) + command: rm -rf "{{ pg_dir }}" + ignore_errors: True + when: ansible_os_family == "RedHat" + +# Theoretically, pg_dropcluster should work but it doesn't so rm files +- name: remove old db config (debian) + command: rm -rf /etc/postgresql + ignore_errors: True + when: ansible_os_family == "Debian" + +- name: remove old db files (debian) + command: rm -rf /var/lib/postgresql + ignore_errors: True + when: ansible_os_family == "Debian" + +- name: install rpm dependencies for postgresql test + yum: name={{ item }} state=latest + with_items: postgresql_packages + when: ansible_pkg_mgr == 'yum' + +- name: install dpkg dependencies for postgresql test + apt: name={{ item }} state=latest + with_items: postgresql_packages + when: ansible_pkg_mgr == 'apt' + +- name: Initialize postgres (systemd) + command: postgresql-setup initdb + when: ansible_distribution == "Fedora" or (ansible_os_family == "RedHat" and ansible_distribution_major_version|int >= 7) + +- name: Initialize postgres (sysv) + command: /sbin/service postgresql initdb + when: ansible_os_family == "RedHat" and ansible_distribution_major_version|int <= 6 + +- name: Iniitalize postgres (upstart) + command: /usr/bin/pg_createcluster {{ pg_ver }} main + # Sometimes package install creates the db cluster, sometimes this step is needed + ignore_errors: True + when: ansible_os_family == 'Debian' + +- name: Copy pg_hba into place + copy: src=pg_hba.conf dest="{{ pg_hba_location }}" owner="postgres" group="root" mode="0644" + +- name: Generate pt_BR locale (Debian) + command: locale-gen pt_BR + when: ansible_os_family == 'Debian' + +- name: Generate es_MX locale (Debian) + command: locale-gen es_MX + when: ansible_os_family == 'Debian' + +- name: Generate pt_BR locale (Red Hat) + command: localedef -f ISO-8859-1 -i pt_BR pt_BR + when: ansible_os_family == 'RedHat' + +- name: Generate es_MX locale (Red Hat) + command: localedef -f ISO-8859-1 -i es_MX es_MX + when: ansible_os_family == 'RedHat' + +- name: restart postgresql service + service: name={{ postgresql_service }} state=restarted diff --git a/test/integration/roles/setup_postgresql_db/vars/Ubuntu-12.yml b/test/integration/roles/setup_postgresql_db/vars/Ubuntu-12.yml new file mode 100644 index 00000000000..b2507c98496 --- /dev/null +++ b/test/integration/roles/setup_postgresql_db/vars/Ubuntu-12.yml @@ -0,0 +1,11 @@ +postgresql_service: "postgresql" + +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python-psycopg2" + +pg_hba_location: "/etc/postgresql/9.1/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/9.1/main" +pg_ver: 9.1 + diff --git a/test/integration/roles/setup_postgresql_db/vars/Ubuntu-14.yml b/test/integration/roles/setup_postgresql_db/vars/Ubuntu-14.yml new file mode 100644 index 00000000000..7d704264da7 --- /dev/null +++ b/test/integration/roles/setup_postgresql_db/vars/Ubuntu-14.yml @@ -0,0 +1,10 @@ +postgresql_service: "postgresql" + +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python-psycopg2" + +pg_hba_location: "/etc/postgresql/9.3/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/9.3/main" +pg_ver: 9.3 diff --git a/test/integration/roles/setup_postgresql_db/vars/default.yml b/test/integration/roles/setup_postgresql_db/vars/default.yml new file mode 100644 index 00000000000..dc7db0fc981 --- /dev/null +++ b/test/integration/roles/setup_postgresql_db/vars/default.yml @@ -0,0 +1,8 @@ +postgresql_service: "postgresql" + +postgresql_packages: + - "postgresql-server" + - "python-psycopg2" + +pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf" +pg_dir: "/var/lib/pgsql/data" diff --git a/test/integration/roles/test_apt/tasks/apt-builddep.yml b/test/integration/roles/test_apt/tasks/apt-builddep.yml new file mode 100644 index 00000000000..e3f9357b12a --- /dev/null +++ b/test/integration/roles/test_apt/tasks/apt-builddep.yml @@ -0,0 +1,55 @@ +# test installing build-deps using netcat and quilt as test victims. +# +# Deps can be discovered like so (taken from ubuntu 12.04) +# ==== +# root@localhost:~ # apt-rdepends --build-depends --follow=DEPENDS netcat +# Reading package lists... Done +# Building dependency tree +# Reading state information... Done +# netcat +# Build-Depends: debhelper (>= 8.0.0) +# Build-Depends: quilt +# root@localhost:~ # +# ==== +# Since many things depend on debhelper, let's just uninstall quilt, then +# install build-dep for netcat to get it back. build-dep doesn't have an +# uninstall, so we don't need to test for reverse actions (eg, uninstall +# build-dep and ensure things are clean) + +# uninstall quilt +- name: check quilt with dpkg + shell: dpkg -s quilt + register: dpkg_result + ignore_errors: true + tags: ['test_apt_builddep'] + +- name: uninstall quilt with apt + apt: pkg=quilt state=absent purge=yes + register: apt_result + when: dpkg_result|success + tags: ['test_apt_builddep'] + +# install build-dep for netcat +- name: install netcat build-dep with apt + apt: pkg=netcat state=build-dep + register: apt_result + tags: ['test_apt_builddep'] + +- name: verify build_dep of netcat + assert: + that: + - "'changed' in apt_result" + tags: ['test_apt_builddep'] + +# ensure debhelper and qilt are installed +- name: check build_deps with dpkg + shell: dpkg --get-selections | egrep '(debhelper|quilt)' + failed_when: False + register: dpkg_result + tags: ['test_apt_builddep'] + +- name: verify build_deps are really there + assert: + that: + - "dpkg_result.rc == 0" + tags: ['test_apt_builddep'] diff --git a/test/integration/roles/test_apt/tasks/apt.yml b/test/integration/roles/test_apt/tasks/apt.yml index d1cdeeb1a2f..5457c2ef781 100644 --- a/test/integration/roles/test_apt/tasks/apt.yml +++ b/test/integration/roles/test_apt/tasks/apt.yml @@ -77,4 +77,27 @@ that: - "not apt_result.changed" +# UNINSTALL AGAIN +- name: uninstall hello with apt + apt: pkg=hello state=absent purge=yes + register: apt_result + +# INSTALL WITH VERSION WILDCARD +- name: install hello with apt + apt: name=hello=2.* state=present + register: apt_result + +- name: check hello with wildcard with dpkg + shell: dpkg --get-selections | fgrep hello + failed_when: False + register: dpkg_result + +- debug: var=apt_result +- debug: var=dpkg_result + +- name: verify installation of hello + assert: + that: + - "apt_result.changed" + - "dpkg_result.rc == 0" diff --git a/test/integration/roles/test_apt/tasks/main.yml b/test/integration/roles/test_apt/tasks/main.yml index 4f2215f57ac..8976087371d 100644 --- a/test/integration/roles/test_apt/tasks/main.yml +++ b/test/integration/roles/test_apt/tasks/main.yml @@ -19,3 +19,5 @@ - include: 'apt.yml' when: ansible_distribution in ('Ubuntu', 'Debian') +- include: 'apt-builddep.yml' + when: ansible_distribution in ('Ubuntu', 'Debian') diff --git a/test/integration/roles/test_assemble/tasks/main.yml b/test/integration/roles/test_assemble/tasks/main.yml index f06cee6ace8..d0c1f15e56d 100644 --- a/test/integration/roles/test_assemble/tasks/main.yml +++ b/test/integration/roles/test_assemble/tasks/main.yml @@ -37,7 +37,19 @@ assert: that: - "result.state == 'file'" - - "result.md5sum == '96905702a2ece40de6bf3a94b5062513'" + - "result.changed == True" + - "result.checksum == '048a1bd1951aa5ccc427eeb4ca19aee45e9c68b3'" + +- name: test assemble with all fragments + assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled1" + register: result + +- name: assert that the same assemble made no changes + assert: + that: + - "result.state == 'file'" + - "result.changed == False" + - "result.checksum == '048a1bd1951aa5ccc427eeb4ca19aee45e9c68b3'" - name: test assemble with fragments matching a regex assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled2" regexp="^fragment[1-3]$" @@ -47,7 +59,7 @@ assert: that: - "result.state == 'file'" - - "result.md5sum == 'eb9e3486a9cd6943b5242e573b9b9349'" + - "result.checksum == 'edfe2d7487ef8f5ebc0f1c4dc57ba7b70a7b8e2b'" - name: test assemble with a delimiter assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled3" delimiter="#--- delimiter ---#" @@ -57,7 +69,7 @@ assert: that: - "result.state == 'file'" - - "result.md5sum == '4773eac67aba3f0be745876331c8a450'" + - "result.checksum == '505359f48c65b3904127cf62b912991d4da7ed6d'" - name: test assemble with remote_src=False assemble: src="./" dest="{{output_dir}}/assembled4" remote_src=no @@ -67,7 +79,7 @@ assert: that: - "result.state == 'file'" - - "result.md5sum == '96905702a2ece40de6bf3a94b5062513'" + - "result.checksum == '048a1bd1951aa5ccc427eeb4ca19aee45e9c68b3'" - name: test assemble with remote_src=False and a delimiter assemble: src="./" dest="{{output_dir}}/assembled5" remote_src=no delimiter="#--- delimiter ---#" @@ -77,5 +89,5 @@ assert: that: - "result.state == 'file'" - - "result.md5sum == '4773eac67aba3f0be745876331c8a450'" + - "result.checksum == '505359f48c65b3904127cf62b912991d4da7ed6d'" diff --git a/test/integration/roles/test_async/tasks/main.yml b/test/integration/roles/test_async/tasks/main.yml index 556284770ac..0b9991ec049 100644 --- a/test/integration/roles/test_async/tasks/main.yml +++ b/test/integration/roles/test_async/tasks/main.yml @@ -62,3 +62,28 @@ async: 15 poll: 0 when: False + +# test async "fire and forget, but check later" + +- name: 'start a task with "fire-and-forget"' + command: sleep 15 + async: 30 + poll: 0 + register: fnf_task + +- name: assert task was successfully started + assert: + that: + - fnf_task.started + - "'ansible_job_id' in fnf_task" + +- name: 'check on task started as a "fire-and-forget"' + async_status: jid={{ fnf_task.ansible_job_id }} + register: fnf_result + until: fnf_result.finished + retries: 30 + +- name: assert task was successfully checked + assert: + that: + - fnf_result.finished diff --git a/test/integration/roles/test_bad_parsing/meta/main.yml b/test/integration/roles/test_bad_parsing/meta/main.yml new file mode 100644 index 00000000000..c845eccfcd9 --- /dev/null +++ b/test/integration/roles/test_bad_parsing/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - { role: prepare_tests } diff --git a/test/integration/roles/test_bad_parsing/tasks/main.yml b/test/integration/roles/test_bad_parsing/tasks/main.yml index fae01f2ee9d..3899821de6f 100644 --- a/test/integration/roles/test_bad_parsing/tasks/main.yml +++ b/test/integration/roles/test_bad_parsing/tasks/main.yml @@ -49,5 +49,7 @@ failed_when: False tags: scenario4 - +- name: test that a missing/malformed jinja2 filter fails + debug: msg="{{output_dir|badfiltername}}" + tags: scenario5 diff --git a/test/integration/roles/test_binary/files/b64_latin1 b/test/integration/roles/test_binary/files/b64_latin1 new file mode 100644 index 00000000000..c7fbdeb6328 --- /dev/null +++ b/test/integration/roles/test_binary/files/b64_latin1 @@ -0,0 +1 @@ +Café Eñe diff --git a/test/integration/roles/test_binary/files/b64_utf8 b/test/integration/roles/test_binary/files/b64_utf8 new file mode 100644 index 00000000000..c7fbdeb6328 --- /dev/null +++ b/test/integration/roles/test_binary/files/b64_utf8 @@ -0,0 +1 @@ +Café Eñe diff --git a/test/integration/roles/test_binary/files/from_playbook b/test/integration/roles/test_binary/files/from_playbook new file mode 100644 index 00000000000..c7fbdeb6328 --- /dev/null +++ b/test/integration/roles/test_binary/files/from_playbook @@ -0,0 +1 @@ +Café Eñe diff --git a/test/integration/roles/test_binary/meta/main.yml b/test/integration/roles/test_binary/meta/main.yml new file mode 100644 index 00000000000..1050c23ce30 --- /dev/null +++ b/test/integration/roles/test_binary/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + diff --git a/test/integration/roles/test_binary/tasks/main.yml b/test/integration/roles/test_binary/tasks/main.yml new file mode 100644 index 00000000000..486ee6d6b00 --- /dev/null +++ b/test/integration/roles/test_binary/tasks/main.yml @@ -0,0 +1,131 @@ +--- +# Various ways users want to use binary data +# Could integrate into individual modules but currently these don't all work. +# Probably easier to see them all in a single block to know what we're testing. +# When we can start testing v2 we should test that all of these work. + +# In v1: The following line will traceback if it's the first task in the role. +# Does not traceback if it's the second or third etc task. +- debug: msg="{{ utf8_simple_accents|b64decode}}" + +# Expected values of the written files +- name: get checksums that we expect later files to have + copy: + src: from_playbook + dest: "{{ output_dir }}" + +- copy: + src: b64_utf8 + dest: "{{ output_dir }}" + +- copy: + src: b64_latin1 + dest: "{{ output_dir }}" + +- stat: + path: "{{ output_dir }}/from_playbook" + register: from_playbook + +- stat: + path: "{{ output_dir }}/b64_utf8" + register: b64_utf8 + +- stat: + path: "{{ output_dir }}/b64_latin1" + register: b64_latin1 + +# Tests themselves +- name: copy with utf-8 content in a playbook + copy: + content: "{{ simple_accents }}\n" + dest: "{{ output_dir }}/from_playbook.txt" + +- name: Check that copying utf-8 content matches + stat: + path: "{{ output_dir }}/from_playbook.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == from_playbook.stat.checksum' + +- name: copy with utf8 in a base64 encoded string + copy: + content: "{{ utf8_simple_accents|b64decode }}\n" + dest: "{{ output_dir }}/b64_utf8.txt" + +- name: Check that utf8 in a base64 string matches + stat: + path: "{{ output_dir }}/b64_utf8.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == b64_utf8.stat.checksum' + +- name: copy with latin1 in a base64 encoded string + copy: + content: "{{ latin1_simple_accents|b64decode }}\n" + dest: "{{ output_dir }}/b64_latin1.txt" + +- name: Check that latin1 in a base64 string matches + stat: + path: "{{ output_dir }}/b64_latin1.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == b64_latin1.stat.checksum' + # This one depends on being able to pass binary data through + # Might be a while before we find a solution for this + ignore_errors: True + +- name: Template with a unicode string from the playbook + template: + src: "from_playbook_template.j2" + dest: "{{ output_dir }}/from_playbook_template.txt" + +- name: Check that writing a template from a playbook var matches + stat: + path: "{{ output_dir }}/from_playbook_template.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == from_playbook.stat.checksum' + +- name: Template with utf8 in a base64 encoded string + template: + src: "b64_utf8_template.j2" + dest: "{{ output_dir }}/b64_utf8_template.txt" + +- name: Check that writing a template from a base64 encoded utf8 string matches + stat: + path: "{{ output_dir }}/b64_utf8_template.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == b64_utf8.stat.checksum' + +- name: Template with latin1 in a base64 encoded string + template: + src: "b64_latin1_template.j2" + dest: "{{ output_dir }}/b64_latin1_template.txt" + +- name: Check that writing a template from a base64 encoded latin1 string matches + stat: + path: "{{ output_dir }}/b64_latin1_template.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == b64_latin1.stat.checksum' + # This one depends on being able to pass binary data through + # Might be a while before we find a solution for this + ignore_errors: True + +# These might give garbled output but none of them should traceback +- debug: var=simple_accents +- debug: msg="{{ utf8_simple_accents|b64decode}}" +- debug: msg="{{ latin1_simple_accents|b64decode}}" diff --git a/test/integration/roles/test_binary/templates/b64_latin1_template.j2 b/test/integration/roles/test_binary/templates/b64_latin1_template.j2 new file mode 100644 index 00000000000..ee2fc1b19c3 --- /dev/null +++ b/test/integration/roles/test_binary/templates/b64_latin1_template.j2 @@ -0,0 +1 @@ +{{ latin1_simple_accents|b64decode }} diff --git a/test/integration/roles/test_binary/templates/b64_utf8_template.j2 b/test/integration/roles/test_binary/templates/b64_utf8_template.j2 new file mode 100644 index 00000000000..9fd3ed48b18 --- /dev/null +++ b/test/integration/roles/test_binary/templates/b64_utf8_template.j2 @@ -0,0 +1 @@ +{{ utf8_simple_accents|b64decode }} diff --git a/test/integration/roles/test_binary/templates/from_playbook_template.j2 b/test/integration/roles/test_binary/templates/from_playbook_template.j2 new file mode 100644 index 00000000000..3be6dd4f0b5 --- /dev/null +++ b/test/integration/roles/test_binary/templates/from_playbook_template.j2 @@ -0,0 +1 @@ +{{ simple_accents }} diff --git a/test/integration/roles/test_binary/vars/main.yml b/test/integration/roles/test_binary/vars/main.yml new file mode 100644 index 00000000000..f6d40232c37 --- /dev/null +++ b/test/integration/roles/test_binary/vars/main.yml @@ -0,0 +1,3 @@ +simple_accents: 'Café Eñe' +utf8_simple_accents: 'Q2Fmw6kgRcOxZQ==' +latin1_simple_accents: 'Q2Fm6SBF8WU=' diff --git a/test/integration/roles/test_command_shell/tasks/main.yml b/test/integration/roles/test_command_shell/tasks/main.yml index 8a15c99957a..b331452b7c6 100644 --- a/test/integration/roles/test_command_shell/tasks/main.yml +++ b/test/integration/roles/test_command_shell/tasks/main.yml @@ -185,7 +185,7 @@ "multiline echo" \ "with a new line in quotes" \ - | md5sum \ + | sha1sum \ | tr -s ' ' \ | cut -f1 -d ' ' echo "this is a second line" @@ -197,4 +197,20 @@ assert: that: - "shell_result6.changed" - - "shell_result6.stdout == '32f3cc201b69ed8afa3902b80f554ca8\nthis is a second line'" + - "shell_result6.stdout == '5575bb6b71c9558db0b6fbbf2f19909eeb4e3b98\nthis is a second line'" + +- name: execute a shell command using a literal multiline block with arguments in it + shell: | + executable=/bin/bash + creates={{output_dir_test | expanduser}}/afile.txt + echo "test" + register: shell_result7 + +- name: assert the multiline shell command with arguments in it run as expected + assert: + that: + - "shell_result7.changed" + - "shell_result7.stdout == 'test'" + +- name: remove the previously created file + file: path={{output_dir_test}}/afile.txt state=absent diff --git a/test/integration/roles/test_conditionals/tasks/main.yml b/test/integration/roles/test_conditionals/tasks/main.yml index f2aa0068c60..01a4f960d73 100644 --- a/test/integration/roles/test_conditionals/tasks/main.yml +++ b/test/integration/roles/test_conditionals/tasks/main.yml @@ -267,3 +267,33 @@ that: - "result.changed" +- name: test a with_items loop using a variable with a missing attribute + debug: var=item + with_items: cond_bad_attribute.results + when: cond_bad_attribute is defined and 'results' in cond_bad_attribute + register: result + +- name: assert the task was skipped + assert: + that: + - "result.results|length == 1" + - "'skipped' in result.results[0]" + - "result.results[0].skipped == True" + +- name: test a with_items loop skipping a single item + debug: var=item + with_items: cond_list_of_items.results + when: item != 'b' + register: result + +- debug: var=result + +- name: assert only a single item was skipped + assert: + that: + - result.results|length == 3 + - result.results[1].skipped + +- name: test complex templated condition + debug: msg="it works" + when: vars_file_var in things1|union([vars_file_var]) diff --git a/test/integration/roles/test_conditionals/vars/main.yml b/test/integration/roles/test_conditionals/vars/main.yml new file mode 100644 index 00000000000..a6ecf62f534 --- /dev/null +++ b/test/integration/roles/test_conditionals/vars/main.yml @@ -0,0 +1,13 @@ +--- +# foo is a dictionary that will be used to check that +# a conditional passes a with_items loop on a variable +# with a missing attribute (ie. foo.results) +cond_bad_attribute: + bar: a + +cond_list_of_items: + results: + - a + - b + - c + diff --git a/test/integration/roles/test_consul_acl/tasks/main.yml b/test/integration/roles/test_consul_acl/tasks/main.yml new file mode 100644 index 00000000000..1448c63741b --- /dev/null +++ b/test/integration/roles/test_consul_acl/tasks/main.yml @@ -0,0 +1,42 @@ +- name: create a new acl token + consul_acl: + mgmt_token: '{{mgmt_token}}' + host: '{{acl_host}}' + name: 'New ACL' + register: new_ruleless + +- name: verify ruleless key created + assert: + that: + - new_ruleless.token | length == 36 + - new_ruleless.name == 'New ACL' + +- name: add rules to an acl token + consul_acl: + mgmt_token: '{{mgmt_token}}' + host: '{{acl_host}}' + name: 'With rule' + rules: + - key: 'foo' + policy: read + - key: 'private/foo' + policy: deny + register: with_rules + +- name: verify rules created + assert: + that: + - with_rules.token | length == 36 + - with_rules.name == 'With rule' + - with_rules.rules | match('.*"foo".*') + - with_rules.rules | match('.*"private/foo".*') + +- name: clear up + consul_acl: + mgmt_token: '{{mgmt_token}}' + host: '{{acl_host}}' + token: '{{item}}' + state: absent + with_items: + - '{{new_ruleless.token}}' + - '{{with_rules.token}}' diff --git a/test/integration/roles/test_consul_inventory/tasks/main.yml b/test/integration/roles/test_consul_inventory/tasks/main.yml new file mode 100644 index 00000000000..f65de2fae00 --- /dev/null +++ b/test/integration/roles/test_consul_inventory/tasks/main.yml @@ -0,0 +1,39 @@ +- name: there are three hosts with an available consul service + assert: + that: + - groups.consul_servers | length == 3 + + +- name: there is one host with an available ssh service + assert: + that: + - groups.ssh_up | length == 1 + +- name: there is one host with a failing nginx service + assert: + that: + - groups.nginx_down | length == 1 + +- name: services get added to groups with their tags + assert: + that: + - groups.nginx_servers_qa | length == 1 + - groups.nginx_servers_slave | length == 1 + +- name: metadata from the kv store gets added to the facts for a host + assert: + that: + - clearance | match('top_secret') + when: inventory_hostname == '11.0.0.2' + +- name: extra groups a host should be added to can be loaded from kv + assert: + that: + - groups.a_group | length == 1 + - groups.another_group | length == 1 + +- name: ansible_ssh_port is set if the ssh service is registered + assert: + that: + - ansible_ssh_port == 2222 + when: not inventory_hostname in ['11.0.0.2', '11.0.0.3', '11.0.0.4'] diff --git a/test/integration/roles/test_consul_kv/tasks/main.yml b/test/integration/roles/test_consul_kv/tasks/main.yml new file mode 100644 index 00000000000..4aff909418b --- /dev/null +++ b/test/integration/roles/test_consul_kv/tasks/main.yml @@ -0,0 +1,90 @@ +- name: add rules to an acl token + consul_acl: + mgmt_token: '{{mgmt_token}}' + host: '{{acl_host}}' + name: 'ACL rule for testing' + rules: + - key: 'somekey' + policy: all + register: test_acl + +- name: cleanup from previous failed runs + consul_kv: key={{item}} state=absent token='{{test_acl.token}}' + with_items: + - somekey + +- name: add a kv pair to the kv store + consul_kv: key=somekey value=somevalue token='{{test_acl.token}}' + register: new_key + +- name: verify new key + assert: + that: + - new_key.key == 'somekey' + - new_key.data.Value == 'somevalue' + - new_key.changed == true + +- name: add an existing kv to the kv store + consul_kv: key=somekey value=somevalue token='{{test_acl.token}}' + register: existing_key + +- name: verify existing key cause no change + assert: + that: + - existing_key.key == 'somekey' + - existing_key.data.Value == 'somevalue' + - existing_key.changed == False + +- name: remove an existing kv from the kv store + consul_kv: key=somekey state=absent token='{{test_acl.token}}' + register: remove_key + +- name: verify removal causes change and existing value is returned + assert: + that: + - remove_key.key == 'somekey' + - remove_key.data.Value == 'somevalue' + - remove_key.changed == True + +- name: attempting to remove an non-existant kv from the kv store causes no change + consul_kv: key=not_present state=absent token='{{test_acl.token}}' + register: non_existant_key + +- name: verify removal causes change and existing value is returned + assert: + that: + - non_existant_key.key == 'not_present' + - non_existant_key.data == None + - non_existant_key.changed == False + +- name: Add a key to lookup with the lookup capability + consul_kv: key='key/to/lookup_{{item}}' value='somevalue_{{item}}' token='{{test_acl.token}}' + with_items: + - one + - two + register: lookup_keys + + # necessary to make the new token available to the +- set_fact: acl_token={{test_acl.token}} + +- name: kv test + assert: + that: + - "{{item | match('somevalue_one')}}" + with_consul_kv: + - 'key/to/lookup_one token={{acl_token}}' + + +- name: recursive kv lookup test + assert: + that: + - "{{item| match('somevalue_(one|two)')}}" + with_consul_kv: + - 'key/to recurse=true token={{acl_token}}' + +- name: remove test acl rule + consul_acl: + mgmt_token: '{{mgmt_token}}' + host: '{{acl_host}}' + token: '{{test_acl.token}}' + state: absent diff --git a/test/integration/roles/test_consul_service/tasks/main.yml b/test/integration/roles/test_consul_service/tasks/main.yml new file mode 100644 index 00000000000..dff2252c107 --- /dev/null +++ b/test/integration/roles/test_consul_service/tasks/main.yml @@ -0,0 +1,156 @@ +- name: cleanup any previous failed runs + consul: + service_id: '{{item}}' + state: absent + with_items: + - service1 + - service2 + - with_check + - with_tags + +- name: register very basic service with consul gets default id + consul: + service_name: service1 + service_port: 80 + register: basic_result + +- name: verify basic service registration + assert: + that: + - basic_result.changed + - basic_result.service_port == 80 + - basic_result.service_id == 'service1' + - basic_result.service_name == 'service1' + +- name: register very basic service with explicit id + consul: + service_name: Basic Service + service_id: service2 + service_port: 80 + register: basic2_result + +- name: verify service2 registration + assert: + that: + - basic2_result.changed + - basic2_result.service_port == 80 + - basic2_result.service_id == 'service2' + - basic2_result.service_name == 'Basic Service' + +- name: register very basic service with check script + consul: + service_name: with_check + service_port: 80 + script: "sh -c true" + interval: 60 + register: script_result + +- name: verify service with check registration + assert: + that: + - script_result.changed + - script_result.service_port == 80 + - script_result.service_id == 'with_check' + - script_result.checks | length == 1 + +- name: register service with some service tags + consul: + service_name: with_tags + service_port: 80 + tags: + - prod + - webservers + register: tags_result + + +- name: verify tags registration + assert: + that: + - tags_result.changed + - "tags_result.tags == ['prod', 'webservers']" + +- name: create a node level check + consul: + check_name: Node Level Check + check_id: node_level + script: "sh -c true" + interval: 50m + register: nodelevel_result + +- name: verify service with check registration + assert: + that: + - nodelevel_result.changed + - nodelevel_result.check_name == 'Node Level Check' + - nodelevel_result.check_id == 'node_level' + - nodelevel_result.script == 'sh -c true' + - nodelevel_result.interval == '50m' + +- name: remove a service + consul: + service_id: 'service1' + state: absent + register: delete_service_result + +- name: verify service removal + assert: + that: + - delete_service_result.changed + +- name: removal of an non-existant service causes no change + consul: + service_id: 'service1' + state: absent + register: delete_service_result + +- name: verify service removal caused no change + assert: + that: + - not delete_service_result.changed + +- name: remove a check + consul: + check_id: 'node_level' + state: absent + register: delete_check_result + +- name: verify check removal + assert: + that: + - delete_check_result.changed + +- name: removal of an non-existant check causes no change + consul: + check_id: 'node_level' + state: absent + register: delete_check_result + +- name: verify check removal cause no change + assert: + that: + - not delete_service_result.changed + +- name: add service to test removal by name + consul: + service_name: by_name + service_port: 12345 + +- name: remove service by name + consul: + service_name: by_name + state: absent + register: delete_service_by_name_result + +- name: verify service removal + assert: + that: + - delete_service_by_name_result.changed + +- name: cleanup + consul: + service_id: '{{item}}' + state: absent + with_items: + - service2 + - with_check + - with_tags diff --git a/test/integration/roles/test_consul_session/tasks/main.yml b/test/integration/roles/test_consul_session/tasks/main.yml new file mode 100644 index 00000000000..7e556aa6905 --- /dev/null +++ b/test/integration/roles/test_consul_session/tasks/main.yml @@ -0,0 +1,77 @@ +- name: register basic session with consul + consul_session: + name: session1 + register: basic_result + +- name: verify basic session registration + assert: + that: + - basic_result.changed + - basic_result.session_id | length == 36 + - basic_result.name == 'session1' + +- name: add checks for session health check + consul: + check_name: session_check + script: /usr/bin/true + interval: 15 + +- name: register a session with check + consul_session: + name: session_with_check + checks: + - session_check + register: with_check + +- name: verify basic session registration + assert: + that: + - with_check.changed + - with_check.session_id | length == 36 + - with_check.name == 'session_with_check' + - with_check.checks == ['session_check'] + +- name: register a session with lock_delay + consul_session: + name: session_with_delay + delay: 20 + register: with_delay + +- name: verify registration of session with delay + assert: + that: + - with_delay.changed + - with_delay.session_id | length == 36 + - with_delay.name == 'session_with_delay' + - with_delay.delay == 20 + + +- name: retrieve session by id + consul_session: id='{{with_delay.session_id}}' state=info + register: retrieved_by_id + +- name: verify retrieval by id + assert: + that: + - with_delay.session_id == retrieved_by_id.sessions[1].ID + +- name: retrieve sessions by id + consul_session: state=list + register: retrieved_by_list + +- name: verify retrieval by list + assert: + that: + - 3 <= retrieved_by_list.sessions[0] + +- name: remove sessions + consul_session: id={{item}} state=absent + with_items: + - basic_result.session_id + - with_check.session_id + - with_delay.session_id + +- name: remove check + consul: + check_name: session_check + state: absent diff --git a/test/integration/roles/test_copy/tasks/main.yml b/test/integration/roles/test_copy/tasks/main.yml index 8c4892bea80..5e77295fbb3 100644 --- a/test/integration/roles/test_copy/tasks/main.yml +++ b/test/integration/roles/test_copy/tasks/main.yml @@ -40,6 +40,7 @@ - "'group' in copy_result" - "'gid' in copy_result" - "'md5sum' in copy_result" + - "'checksum' in copy_result" - "'owner' in copy_result" - "'size' in copy_result" - "'src' in copy_result" @@ -51,10 +52,16 @@ that: - "copy_result.changed == true" -- name: verify that the file md5sum is correct - assert: - that: +- name: verify that the file checksums are correct + assert: + that: + - "copy_result.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'" + +- name: verify that the legacy md5sum is correct + assert: + that: - "copy_result.md5sum == 'c47397529fe81ab62ba3f85e9f4c71f2'" + when: ansible_fips != True - name: check the stat results of the file stat: path={{output_file}} @@ -70,7 +77,13 @@ - "stat_results.stat.isfifo == false" - "stat_results.stat.isreg == true" - "stat_results.stat.issock == false" + - "stat_results.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'" + +- name: verify that the legacy md5sum is correct + assert: + that: - "stat_results.stat.md5 == 'c47397529fe81ab62ba3f85e9f4c71f2'" + when: ansible_fips != True - name: overwrite the file via same means copy: src=foo.txt dest={{output_file}} @@ -180,7 +193,7 @@ that: - "copy_result6.changed" - "copy_result6.dest == '{{output_dir|expanduser}}/multiline.txt'" - - "copy_result6.md5sum == '1627d51e7e607c92cf1a502bf0c6cce3'" + - "copy_result6.checksum == '9cd0697c6a9ff6689f0afb9136fa62e0b3fee903'" # test overwriting a file as an unprivileged user (pull request #8624) # this can't be relative to {{output_dir}} as ~root usually has mode 700 @@ -202,8 +215,38 @@ that: - "copy_result7.changed" - "copy_result7.dest == '/tmp/worldwritable/file.txt'" - - "copy_result7.md5sum == '73feffa4b7f6bb68e44cf984c85f6e88'" + - "copy_result7.checksum == 'bbe960a25ea311d21d40669e93df2003ba9b90a2'" - name: clean up file: dest=/tmp/worldwritable state=absent +# test overwriting a link using "follow=yes" so that the link +# is preserved and the link target is updated + +- name: create a test file to symlink to + copy: dest={{output_dir}}/follow_test content="this is the follow test file\n" + +- name: create a symlink to the test file + file: path={{output_dir}}/follow_link src='./follow_test' state=link + +- name: update the test file using follow=True to preserve the link + copy: dest={{output_dir}}/follow_link content="this is the new content\n" follow=yes + register: replace_follow_result + +- name: stat the link path + stat: path={{output_dir}}/follow_link + register: stat_link_result + +- name: assert that the link is still a link + assert: + that: + - stat_link_result.stat.islnk + +- name: get the checksum of the link target + shell: sha1sum {{output_dir}}/follow_test | cut -f1 -sd ' ' + register: target_file_result + +- name: assert that the link target was updated + assert: + that: + - replace_follow_result.checksum == target_file_result.stdout diff --git a/test/integration/roles/test_embedded_module/library/test_integration_module b/test/integration/roles/test_embedded_module/library/test_integration_module new file mode 100644 index 00000000000..5af29b4c019 --- /dev/null +++ b/test/integration/roles/test_embedded_module/library/test_integration_module @@ -0,0 +1,3 @@ +#!/usr/bin/env python + +print '{"changed":false, "msg":"this is the embedded module"}' diff --git a/test/integration/roles/test_embedded_module/tasks/main.yml b/test/integration/roles/test_embedded_module/tasks/main.yml new file mode 100644 index 00000000000..6a6d6485fc3 --- /dev/null +++ b/test/integration/roles/test_embedded_module/tasks/main.yml @@ -0,0 +1,9 @@ +- name: run the embedded dummy module + test_integration_module: + register: result + +- name: assert the embedded module ran + assert: + that: + - "'msg' in result" + - result.msg == "this is the embedded module" diff --git a/test/integration/roles/test_file/tasks/main.yml b/test/integration/roles/test_file/tasks/main.yml index 7c8262c27da..d03ded13b6e 100644 --- a/test/integration/roles/test_file/tasks/main.yml +++ b/test/integration/roles/test_file/tasks/main.yml @@ -65,6 +65,29 @@ - name: change ownership and group file: path={{output_dir}}/baz.txt owner=1234 group=1234 +- name: setup a tmp-like directory for ownership test + file: path=/tmp/worldwritable mode=1777 state=directory + +- name: Ask to create a file without enough perms to change ownership + file: path=/tmp/worldwritable/baz.txt state=touch owner=root + sudo: yes + sudo_user: nobody + register: chown_result + ignore_errors: True + +- name: Ask whether the new file exists + stat: path=/tmp/worldwritable/baz.txt + register: file_exists_result + +- name: Verify that the file doesn't exist on failure + assert: + that: + - "chown_result.failed == True" + - "file_exists_result.stat.exists == False" + +- name: clean up + file: path=/tmp/worldwritable state=absent + - name: create soft link to file file: src={{output_file}} dest={{output_dir}}/soft.txt state=link register: file5_result @@ -83,6 +106,15 @@ that: - "file6_result.changed == true" +- name: touch a hard link + file: src={{output_file}} dest={{output_dir}}/hard.txt state=touch + register: file6_touch_result + +- name: verify that the hard link was touched + assert: + that: + - "file6_touch_result.changed == true" + - name: create a directory file: path={{output_dir}}/foobar state=directory register: file7_result @@ -165,7 +197,7 @@ - "file11_result.uid == 1235" - name: fail to create soft link to non existent file - file: src=/noneexistant dest={{output_dir}}/soft2.txt state=link force=no + file: src=/noneexistent dest={{output_dir}}/soft2.txt state=link force=no register: file12_result ignore_errors: true @@ -175,7 +207,7 @@ - "file12_result.failed == true" - name: force creation soft link to non existent - file: src=/noneexistant dest={{output_dir}}/soft2.txt state=link force=yes + file: src=/noneexistent dest={{output_dir}}/soft2.txt state=link force=yes register: file13_result - name: verify that link was created @@ -229,6 +261,43 @@ - 'file17_result.failed == true' - 'file17_result.state == "directory"' +- name: create soft link to directory using absolute path + file: src=/ dest={{output_dir}}/root state=link + register: file18_result + +- name: verify that the result was marked as changed + assert: + that: + - "file18_result.changed == true" + +- name: create another test sub-directory + file: dest={{output_dir}}/sub2 state=directory + register: file19_result + +- name: verify that the new directory was created + assert: + that: + - 'file19_result.changed == true' + - 'file19_result.state == "directory"' + +- name: create soft link to relative file + file: src=../sub1/file1 dest={{output_dir}}/sub2/link1 state=link + register: file20_result + +- name: verify that the result was marked as changed + assert: + that: + - "file20_result.changed == true" + +- name: create soft link to relative directory + file: src=sub1 dest={{output_dir}}/sub1-link state=link + register: file21_result + +- name: verify that the result was marked as changed + assert: + that: + - "file21_result.changed == true" + - name: test file creation with symbolic mode file: dest={{output_dir}}/test_symbolic state=touch mode=u=rwx,g=rwx,o=rwx register: result @@ -382,3 +451,29 @@ that: - result.mode == '0444' +# test the file module using follow=yes, so that the target of a +# symlink is modified, rather than the link itself + +- name: create a test file + copy: dest={{output_dir}}/test_follow content="this is a test file\n" mode=0666 + +- name: create a symlink to the test file + file: path={{output_dir}}/test_follow_link src="./test_follow" state=link + +- name: modify the permissions on the link using follow=yes + file: path={{output_dir}}/test_follow_link mode=0644 follow=yes + register: result + +- name: assert that the chmod worked + assert: + that: + - result.changed + +- name: stat the link target + stat: path={{output_dir}}/test_follow + register: result + +- name: assert that the link target was modified correctly + assert: + that: + - result.stat.mode == '0644' diff --git a/test/integration/roles/test_filters/files/foo.txt b/test/integration/roles/test_filters/files/foo.txt index 5ee5a5812c8..c5af545d3a9 100644 --- a/test/integration/roles/test_filters/files/foo.txt +++ b/test/integration/roles/test_filters/files/foo.txt @@ -1,17 +1,6 @@ This is a test of various filter plugins found in Ansible (ex: core.py), and not so much a test of the core filters in Jinja2. -Dumping a nested structure to JSON - -[ - "this is a list element", - { - "this": "is a hash element in a list", - "warp": 9, - "where": "endor" - } -] - Dumping the same structure to YAML - this is a list element diff --git a/test/integration/roles/test_filters/tasks/main.yml b/test/integration/roles/test_filters/tasks/main.yml index 985cbf83275..3d1ee322e30 100644 --- a/test/integration/roles/test_filters/tasks/main.yml +++ b/test/integration/roles/test_filters/tasks/main.yml @@ -22,6 +22,9 @@ - debug: var=some_registered_var +- name: Verify that we workaround a py26 json bug + template: src=py26json.j2 dest={{output_dir}}/py26json.templated mode=0644 + - name: fill in a basic template template: src=foo.j2 dest={{output_dir}}/foo.templated mode=0644 register: template_result diff --git a/test/integration/roles/test_filters/templates/foo.j2 b/test/integration/roles/test_filters/templates/foo.j2 index 6d1dde20c22..cf592f98cc2 100644 --- a/test/integration/roles/test_filters/templates/foo.j2 +++ b/test/integration/roles/test_filters/templates/foo.j2 @@ -1,10 +1,6 @@ This is a test of various filter plugins found in Ansible (ex: core.py), and not so much a test of the core filters in Jinja2. -Dumping a nested structure to JSON - -{{ some_structure | to_nice_json }} - Dumping the same structure to YAML {{ some_structure | to_nice_yaml }} diff --git a/test/integration/roles/test_filters/templates/py26json.j2 b/test/integration/roles/test_filters/templates/py26json.j2 new file mode 100644 index 00000000000..b87d3c8090b --- /dev/null +++ b/test/integration/roles/test_filters/templates/py26json.j2 @@ -0,0 +1,2 @@ +Provoke a python2.6 json bug +{{ hostvars | to_nice_json }} diff --git a/test/integration/roles/test_git/tasks/main.yml b/test/integration/roles/test_git/tasks/main.yml index 34f2879c5b8..4bdc1d8bd87 100644 --- a/test/integration/roles/test_git/tasks/main.yml +++ b/test/integration/roles/test_git/tasks/main.yml @@ -22,6 +22,11 @@ repo_format1: 'https://github.com/jimi-c/test_role' repo_format2: 'git@github.com:jimi-c/test_role.git' repo_format3: 'ssh://git@github.com/jimi-c/test_role.git' + repo_submodules: 'https://github.com/abadger/test_submodules.git' + repo_submodules_newer: 'https://github.com/abadger/test_submodules_newer.git' + repo_submodule1: 'https://github.com/abadger/test_submodules_subm1.git' + repo_submodule1_newer: 'https://github.com/abadger/test_submodules_subm1_newer.git' + repo_submodule2: 'https://github.com/abadger/test_submodules_subm2.git' known_host_files: - "{{ lookup('env','HOME') }}/.ssh/known_hosts" - '/etc/ssh/ssh_known_hosts' @@ -134,3 +139,215 @@ that: - 'git_result.changed' when: not git_result|skipped + +# Test a non-updating repo query with no destination specified + +- name: get info on a repo without updating and with no destination specified + git: + repo: '{{ repo_format1 }}' + update: no + clone: no + accept_hostkey: yes + register: git_result + +- assert: + that: + - 'git_result.changed' + +# Test that a specific revision can be checked out + +- name: clear checkout_dir + file: state=absent path={{ checkout_dir }} + +- name: clone to specific revision + git: + repo: "{{ repo_format1 }}" + dest: "{{ checkout_dir }}" + version: df4612ba925fbc1b3c51cbb006f51a0443bd2ce9 + +- name: check HEAD after clone to revision + command: git rev-parse HEAD chdir="{{ checkout_dir }}" + register: git_result + +- assert: + that: + - 'git_result.stdout == "df4612ba925fbc1b3c51cbb006f51a0443bd2ce9"' + +- name: update to specific revision + git: + repo: "{{ repo_format1 }}" + dest: "{{ checkout_dir }}" + version: 4e739a34719654db7b04896966e2354e1256ea5d + register: git_result + +- assert: + that: + - 'git_result.changed' + +- name: check HEAD after update to revision + command: git rev-parse HEAD chdir="{{ checkout_dir }}" + register: git_result + +- assert: + that: + - 'git_result.stdout == "4e739a34719654db7b04896966e2354e1256ea5d"' + +# Test a revision not available under refs/heads/ or refs/tags/ + +- name: attempt to get unavailable revision + git: + repo: "{{ repo_format1 }}" + dest: "{{ checkout_dir }}" + version: 2cfde3668b8bb10fbe2b9d5cec486025ad8cc51b + ignore_errors: true + register: git_result + +- assert: + that: + - 'git_result.failed' + +# Same as the previous test, but this time we specify which ref +# contains the SHA1 +- name: update to revision by specifying the refspec + git: + repo: https://github.com/ansible/ansible-examples.git + dest: '{{ checkout_dir }}' + version: 2cfde3668b8bb10fbe2b9d5cec486025ad8cc51b + refspec: refs/pull/7/merge + +- name: check HEAD after update with refspec + command: git rev-parse HEAD chdir="{{ checkout_dir }}" + register: git_result + +- assert: + that: + - 'git_result.stdout == "2cfde3668b8bb10fbe2b9d5cec486025ad8cc51b"' + +- name: clear checkout_dir + file: state=absent path={{ checkout_dir }} + +- name: clone to revision by specifying the refspec + git: + repo: https://github.com/ansible/ansible-examples.git + dest: '{{ checkout_dir }}' + version: 2cfde3668b8bb10fbe2b9d5cec486025ad8cc51b + refspec: refs/pull/7/merge + +- name: check HEAD after update with refspec + command: git rev-parse HEAD chdir="{{ checkout_dir }}" + register: git_result + +- assert: + that: + - 'git_result.stdout == "2cfde3668b8bb10fbe2b9d5cec486025ad8cc51b"' + +# +# Submodule tests +# + +# Repository A with submodules defined (repo_submodules) +# .gitmodules file points to Repository I +# Repository B forked from A that has newer commits (repo_submodules_newer) +# .gitmodules file points to Repository II instead of I +# .gitmodules file also points to Repository III +# Repository I for submodule1 (repo_submodule1) +# Has 1 file checked in +# Repository II forked from I that has newer commits (repo_submodule1_newer) +# Has 2 files checked in +# Repository III for a second submodule (repo_submodule2) +# Has 1 file checked in + +- name: clear checkout_dir + file: state=absent path={{ checkout_dir }} + +- name: Test that clone without recursive does not retrieve submodules + git: + repo: '{{ repo_submodules }}' + dest: '{{ checkout_dir }}' + recursive: no + +- command: 'ls -1a {{ checkout_dir }}/submodule1' + register: submodule1 + +- assert: + that: '{{ submodule1.stdout_lines|length }} == 2' + +- name: clear checkout_dir + file: state=absent path={{ checkout_dir }} + + +- name: Test that clone with recursive retrieves submodules + git: + repo: '{{ repo_submodules }}' + dest: '{{ checkout_dir }}' + recursive: yes + +- command: 'ls -1a {{ checkout_dir }}/submodule1' + register: submodule1 + +- assert: + that: '{{ submodule1.stdout_lines|length }} == 4' + +- name: Copy the checkout so we can run several different tests on it + command: 'cp -pr {{ checkout_dir }} {{ checkout_dir }}.bak' + + + +- name: Test that update without recursive does not change submodules + command: 'git config --replace-all remote.origin.url {{ repo_submodules_newer }}' + args: + chdir: '{{ checkout_dir }}' + +- git: + repo: '{{ repo_submodules_newer }}' + dest: '{{ checkout_dir }}' + recursive: no + update: yes + track_submodules: yes + +- command: 'ls -1a {{ checkout_dir }}/submodule1' + register: submodule1 + +- stat: + path: '{{ checkout_dir }}/submodule2' + register: submodule2 + +- command: 'ls -1a {{ checkout_dir }}/submodule2' + register: submodule2 + +- assert: + that: '{{ submodule1.stdout_lines|length }} == 4' +- assert: + that: '{{ submodule2.stdout_lines|length }} == 2' + + + +- name: Restore checkout to prior state + file: state=absent path={{ checkout_dir }} +- command: 'cp -pr {{ checkout_dir }}.bak {{ checkout_dir }}' + +- name: Test that update with recursive updated existing submodules + command: 'git config --replace-all remote.origin.url {{ repo_submodules_newer }}' + args: + chdir: '{{ checkout_dir }}' + +- git: + repo: '{{ repo_submodules_newer }}' + dest: '{{ checkout_dir }}' + update: yes + recursive: yes + track_submodules: yes + +- command: 'ls -1a {{ checkout_dir }}/submodule1' + register: submodule1 + +- assert: + that: '{{ submodule1.stdout_lines|length }} == 5' + + +- name: Test that update with recursive found new submodules + command: 'ls -1a {{ checkout_dir }}/submodule2' + register: submodule2 + +- assert: + that: '{{ submodule2.stdout_lines|length }} == 4' diff --git a/test/integration/roles/test_lineinfile/tasks/main.yml b/test/integration/roles/test_lineinfile/tasks/main.yml index 34c9db6f4f5..d809bf1983e 100644 --- a/test/integration/roles/test_lineinfile/tasks/main.yml +++ b/test/integration/roles/test_lineinfile/tasks/main.yml @@ -24,7 +24,7 @@ assert: that: - "result.changed == true" - - "result.md5sum == '6be7fb7fa7fb758c80a6dc0722979c40'" + - "result.checksum == '5feac65e442c91f557fc90069ce6efc4d346ab51'" - "result.state == 'file'" - name: insert a line at the beginning of the file, and back it up @@ -42,19 +42,19 @@ stat: path={{result.backup}} register: result -- name: assert the backup file matches the previous md5 +- name: assert the backup file matches the previous hash assert: that: - - "result.stat.md5 == '6be7fb7fa7fb758c80a6dc0722979c40'" + - "result.stat.checksum == '5feac65e442c91f557fc90069ce6efc4d346ab51'" - name: stat the test after the insert at the head stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after the insert at the head +- name: assert test hash is what we expect for the file with the insert at the head assert: that: - - "result.stat.md5 == '07c16434644a2a3cc1807c685917443a'" + - "result.stat.checksum == '7eade4042b23b800958fe807b5bfc29f8541ec09'" - name: insert a line at the end of the file lineinfile: dest={{output_dir}}/test.txt state=present line="New line at the end" insertafter="EOF" @@ -70,10 +70,10 @@ stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after the insert at the end +- name: assert test checksum matches after the insert at the end assert: that: - - "result.stat.md5 == 'da4c2150e5782fcede1840280ab87eff'" + - "result.stat.checksum == 'fb57af7dc10a1006061b000f1f04c38e4bef50a9'" - name: insert a line after the first line lineinfile: dest={{output_dir}}/test.txt state=present line="New line after line 1" insertafter="^This is line 1$" @@ -89,10 +89,10 @@ stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after the insert after the first line +- name: assert test checksum matches after the insert after the first line assert: that: - - "result.stat.md5 == '196722c8faaa28b960bee66fa4cce58c'" + - "result.stat.checksum == '5348da605b1bc93dbadf3a16474cdf22ef975bec'" - name: insert a line before the last line lineinfile: dest={{output_dir}}/test.txt state=present line="New line after line 5" insertbefore="^This is line 5$" @@ -108,10 +108,10 @@ stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after the insert before the last line +- name: assert test checksum matches after the insert before the last line assert: that: - - "result.stat.md5 == 'd5955ee042139dfef16dbe3a7334475f'" + - "result.stat.checksum == 'e1cae425403507feea4b55bb30a74decfdd4a23e'" - name: replace a line with backrefs lineinfile: dest={{output_dir}}/test.txt state=present line="This is line 3" backrefs=yes regexp="^(REF) .* \\1$" @@ -127,16 +127,16 @@ stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after backref line was replaced +- name: assert test checksum matches after backref line was replaced assert: that: - - "result.stat.md5 == '0f585270054e17be242743dd31c6f593'" + - "result.stat.checksum == '2ccdf45d20298f9eaece73b713648e5489a52444'" - name: remove the middle line lineinfile: dest={{output_dir}}/test.txt state=absent regexp="^This is line 3$" register: result -- name: assert that the line was inserted at the head of the file +- name: assert that the line was removed assert: that: - "result.changed == true" @@ -146,10 +146,10 @@ stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after the middle line was removed +- name: assert test checksum matches after the middle line was removed assert: that: - - "result.stat.md5 == '661603660051991b79429c2dc68d9a67'" + - "result.stat.checksum == 'a6ba6865547c19d4c203c38a35e728d6d1942c75'" - name: run a validation script that succeeds lineinfile: dest={{output_dir}}/test.txt state=absent regexp="^This is line 5$" validate="true %s" @@ -165,10 +165,10 @@ stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after the validation succeeded +- name: assert test checksum matches after the validation succeeded assert: that: - - "result.stat.md5 == '9af984939bd859f7794661e501b4f1a4'" + - "result.stat.checksum == '76955a4516a00a38aad8427afc9ee3e361024ba5'" - name: run a validation script that fails lineinfile: dest={{output_dir}}/test.txt state=absent regexp="^This is line 1$" validate="/bin/false %s" @@ -184,10 +184,10 @@ stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches the previous after the validation failed +- name: assert test checksum matches the previous after the validation failed assert: that: - - "result.stat.md5 == '9af984939bd859f7794661e501b4f1a4'" + - "result.stat.checksum == '76955a4516a00a38aad8427afc9ee3e361024ba5'" - name: use create=yes lineinfile: dest={{output_dir}}/new_test.txt create=yes insertbefore=BOF state=present line="This is a new file" @@ -204,10 +204,10 @@ register: result ignore_errors: yes -- name: assert the newly created test md5 matches +- name: assert the newly created test checksum matches assert: that: - - "result.stat.md5 == 'fef1d487711facfd7aa2c87d788c19d9'" + - "result.stat.checksum == '038f10f9e31202451b093163e81e06fbac0c6f3a'" # Test EOF in cases where file has no newline at EOF - name: testnoeof deploy the file for lineinfile @@ -238,12 +238,12 @@ stat: path={{output_dir}}/testnoeof.txt register: result -- name: testnoeof assert test md5 matches after the insert at the end +- name: testnoeof assert test checksum matches after the insert at the end assert: that: - - "result.stat.md5 == 'f75c9d51f45afd7295000e63ce655220'" + - "result.stat.checksum == 'f9af7008e3cb67575ce653d094c79cabebf6e523'" -# Test EOF with empty file to make sure no unneccessary newline is added +# Test EOF with empty file to make sure no unnecessary newline is added - name: testempty deploy the testempty file for lineinfile copy: src=testempty.txt dest={{output_dir}}/testempty.txt register: result @@ -262,18 +262,18 @@ stat: path={{output_dir}}/testempty.txt register: result -- name: testempty assert test md5 matches after the insert at the end +- name: testempty assert test checksum matches after the insert at the end assert: that: - - "result.stat.md5 == '357dcbee8dfb4436f63bab00a235c45a'" + - "result.stat.checksum == 'f440dc65ea9cec3fd496c1479ddf937e1b949412'" - stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after insert the multiple lines +- name: assert test checksum matches after inserting multiple lines assert: that: - - "result.stat.md5 == 'c2510d5bc8fdef8e752b8f8e74c784c2'" + - "result.stat.checksum == 'bf5b711f8f0509355aaeb9d0d61e3e82337c1365'" - name: replace a line with backrefs included in the line lineinfile: dest={{output_dir}}/test.txt state=present line="New \\1 created with the backref" backrefs=yes regexp="^This is (line 4)$" @@ -289,10 +289,10 @@ stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after backref line was replaced +- name: assert test checksum matches after backref line was replaced assert: that: - - "result.stat.md5 == '65f955c2a9722fd43d07103d7756ff9b'" + - "result.stat.checksum == '04b7a54d0fb233a4e26c9e625325bb4874841b3c'" ################################################################### # issue 8535 @@ -332,9 +332,27 @@ stat: path={{output_dir}}/test_quoting.txt register: result -- name: assert test md5 matches after backref line was replaced +- name: assert test checksum matches after backref line was replaced assert: that: - - "result.stat.md5 == '29f349baf1b9c6703beeb346fe8dc669'" + - "result.stat.checksum == '7dc3cb033c3971e73af0eaed6623d4e71e5743f1'" + +- name: insert a line into the quoted file with a single quote + lineinfile: dest={{output_dir}}/test_quoting.txt line="import g'" + register: result + +- name: assert that the quoted file was changed + assert: + that: + - result.changed + +- name: stat the quote test file + stat: path={{output_dir}}/test_quoting.txt + register: result + +- name: assert test checksum matches after backref line was replaced + assert: + that: + - "result.stat.checksum == '73b271c2cc1cef5663713bc0f00444b4bf9f4543'" ################################################################### diff --git a/test/integration/roles/test_lookups/tasks/main.yml b/test/integration/roles/test_lookups/tasks/main.yml index 6480b18b357..8440ff57720 100644 --- a/test/integration/roles/test_lookups/tasks/main.yml +++ b/test/integration/roles/test_lookups/tasks/main.yml @@ -81,7 +81,7 @@ - "wc_result.stdout == '9'" - "cat_result.stdout == newpass" -# ENV LOOKUP +# ENV LOOKUP - name: get first environment var name shell: env | head -n1 | cut -d\= -f1 @@ -92,16 +92,16 @@ register: known_var_value - name: use env lookup to get known var - set_fact: + set_fact: test_val: "{{ lookup('env', known_var_name.stdout) }}" - debug: var=known_var_name.stdout - debug: var=known_var_value.stdout -- debug: var=test_val +- debug: var=test_val - name: compare values assert: - that: + that: - "test_val == known_var_value.stdout" @@ -109,11 +109,23 @@ # https://github.com/ansible/ansible/issues/6550 - name: confirm pipe lookup works with a single positional arg - debug: msg="{{ lookup('pipe', 'ls') }}" - -# https://github.com/ansible/ansible/issues/6550 -- name: confirm pipe lookup works with multiple positional args - debug: msg="{{ lookup('pipe', 'ls -l /tmp') }}" + debug: msg="{{ lookup('pipe', 'ls') }}" +# LOOKUP TEMPLATING + +- name: use bare interpolation + debug: msg="got {{item}}" + with_items: things1 + register: bare_var + +- name: verify that list was interpolated + assert: + that: + - "bare_var.results[0].item == 1" + - "bare_var.results[1].item == 2" + +- name: use list with undefined var in it + debug: msg={{item}} + with_items: things2 diff --git a/test/integration/roles/test_mysql_db/tasks/state_dump_import.yml b/test/integration/roles/test_mysql_db/tasks/state_dump_import.yml index 1980b40638e..44267e1edb5 100644 --- a/test/integration/roles/test_mysql_db/tasks/state_dump_import.yml +++ b/test/integration/roles/test_mysql_db/tasks/state_dump_import.yml @@ -41,7 +41,7 @@ - "result.changed == true" - "result.db =='{{ db_name }}'" -- name: assert database was backup succesfully +- name: assert database was backup successfully command: file {{ db_file_name }} register: result diff --git a/test/integration/roles/test_mysql_user/tasks/main.yml b/test/integration/roles/test_mysql_user/tasks/main.yml index 7ad42d471b2..68042e74913 100644 --- a/test/integration/roles/test_mysql_user/tasks/main.yml +++ b/test/integration/roles/test_mysql_user/tasks/main.yml @@ -118,6 +118,34 @@ - include: remove_user.yml user_name={{user_name_2}} user_password={{ user_password_1 }} +- name: give user access to database via wildcard + mysql_user: name={{ user_name_1 }} priv=%db.*:SELECT append_privs=yes password={{ user_password_1 }} + +- name: show grants access for user1 on multiple database + command: mysql "-e SHOW GRANTS FOR '{{ user_name_1 }}'@'localhost';" + register: result + +- name: assert grant access for user1 on multiple database + assert: + that: + - "'%db' in result.stdout" + - "'SELECT' in result.stdout" + +- name: change user access to database via wildcard + mysql_user: name={{ user_name_1 }} priv=%db.*:INSERT append_privs=yes password={{ user_password_1 }} + +- name: show grants access for user1 on multiple database + command: mysql "-e SHOW GRANTS FOR '{{ user_name_1 }}'@'localhost';" + register: result + +- name: assert grant access for user1 on multiple database + assert: + that: + - "'%db' in result.stdout" + - "'INSERT' in result.stdout" + +- include: remove_user.yml user_name={{user_name_1}} user_password={{ user_password_1 }} + # ============================================================ # Update user password for a user. # Assert the user password is updated and old password can no longer be used. @@ -125,22 +153,22 @@ - include: user_password_update_test.yml # ============================================================ -# Assert create user with SELECT privileges, attemp to create database and update privileges to create database +# Assert create user with SELECT privileges, attempt to create database and update privileges to create database # - include: test_privs.yml current_privilege=SELECT current_append_privs=no # ============================================================ -# Assert creating user with SELECT privileges, attemp to create database and append privileges to create database +# Assert creating user with SELECT privileges, attempt to create database and append privileges to create database # - include: test_privs.yml current_privilege=DROP current_append_privs=yes # ============================================================ -# Assert create user with SELECT privileges, attemp to create database and update privileges to create database +# Assert create user with SELECT privileges, attempt to create database and update privileges to create database # - include: test_privs.yml current_privilege='UPDATE,ALTER' current_append_privs=no # ============================================================ -# Assert creating user with SELECT privileges, attemp to create database and append privileges to create database +# Assert creating user with SELECT privileges, attempt to create database and append privileges to create database # - include: test_privs.yml current_privilege='INSERT,DELETE' current_append_privs=yes diff --git a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml index d342d0a4ac7..8dcc414fde1 100644 --- a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml +++ b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml @@ -57,11 +57,12 @@ ignore_errors: true register: result +- debug: var=result.msg - name: assert output message that database not create with old password assert: that: - "result.failed == true" - - "'check login_user and login_password are correct' in result.msg" + - "'check login credentials (login_user, and login_password' in result.msg" - name: create database using user2 and new password mysql_db: name={{ db_name }} state=present login_user={{ user_name_2 }} login_password={{ user_password_1 }} diff --git a/test/integration/roles/test_postgresql/defaults/main.yml b/test/integration/roles/test_postgresql/defaults/main.yml new file mode 100644 index 00000000000..cfc50737c63 --- /dev/null +++ b/test/integration/roles/test_postgresql/defaults/main.yml @@ -0,0 +1,8 @@ +--- +# defaults file for test_postgresql_db +db_name: 'ansible_db' +db_user1: 'ansible_db_user1' +db_user2: 'ansible_db_user2' + +tmp_dir: '/tmp' + diff --git a/test/integration/roles/test_postgresql/meta/main.yml b/test/integration/roles/test_postgresql/meta/main.yml new file mode 100644 index 00000000000..85b1dc7e4cf --- /dev/null +++ b/test/integration/roles/test_postgresql/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_postgresql_db diff --git a/test/integration/roles/test_postgresql/tasks/main.yml b/test/integration/roles/test_postgresql/tasks/main.yml new file mode 100644 index 00000000000..e814b5fd9ee --- /dev/null +++ b/test/integration/roles/test_postgresql/tasks/main.yml @@ -0,0 +1,882 @@ +# +# Create and destroy db +# +- name: Create DB + sudo_user: postgres + sudo: True + postgresql_db: + state: present + name: "{{ db_name }}" + register: result + +- name: assert that module reports the db was created + assert: + that: + - "result.changed == true" + - "result.db =='{{ db_name }}'" + +- name: Check that database created + sudo_user: postgres + sudo: True + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + +- name: Run create on an already created db + sudo_user: postgres + sudo: True + postgresql_db: + state: present + name: "{{ db_name }}" + register: result + +- name: assert that module reports the db was unchanged + assert: + that: + - "result.changed == false" + +- name: Destroy DB + sudo_user: postgres + sudo: True + postgresql_db: + state: absent + name: "{{ db_name }}" + register: result + +- name: assert that module reports the db was changed + assert: + that: + - "result.changed == true" + +- name: Check that database was destroyed + sudo_user: postgres + sudo: True + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +- name: Destroy DB + sudo_user: postgres + sudo: True + postgresql_db: + state: absent + name: "{{ db_name }}" + register: result + +- name: assert that removing an alreaady removed db makes no change + assert: + that: + - "result.changed == false" + + +# This corner case works to add but not to drop. This is sufficiently crazy +# that I'm not going to attempt to fix it unless someone lets me know that they +# need the functionality +# +# - postgresql_db: +# state: 'present' +# name: '"silly.""name"' +# - shell: echo "select datname from pg_database where datname = 'silly.""name';" | psql +# register: result +# +# - assert: +# that: "result.stdout_lines[-1] == '(1 row)'" +# - postgresql_db: +# state: absent +# name: '"silly.""name"' +# - shell: echo "select datname from pg_database where datname = 'silly.""name';" | psql +# register: result +# +# - assert: +# that: "result.stdout_lines[-1] == '(0 rows)'" + +# +# Test encoding, collate, ctype, template options +# +- name: Create a DB with encoding, collate, ctype, and template options + sudo_user: postgres + sudo: True + postgresql_db: + name: '{{ db_name }}' + state: 'present' + encoding: 'LATIN1' + lc_collate: 'pt_BR' + lc_ctype: 'es_MX' + template: 'template0' + +- name: Check that the DB has all of our options + sudo_user: postgres + sudo: True + shell: echo "select datname, pg_encoding_to_char(encoding), datcollate, datctype from pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'LATIN1' in result.stdout_lines[-2]" + - "'pt_BR' in result.stdout_lines[-2]" + - "'es_MX' in result.stdout_lines[-2]" + - "'UTF8' not in result.stdout_lines[-2]" + - "'en_US' not in result.stdout_lines[-2]" + +- name: Check that running db cration with options a second time does nothing + sudo_user: postgres + sudo: True + postgresql_db: + name: '{{ db_name }}' + state: 'present' + encoding: 'LATIN1' + lc_collate: 'pt_BR' + lc_ctype: 'es_MX' + template: 'template0' + register: result + +- assert: + that: + - 'result.changed == False' + + +- name: Check that attempting to change encoding returns an error + sudo_user: postgres + sudo: True + postgresql_db: + name: '{{ db_name }}' + state: 'present' + encoding: 'UTF8' + lc_collate: 'pt_BR' + lc_ctype: 'es_MX' + template: 'template0' + register: result + ignore_errors: True + +- assert: + that: + - 'result.failed == True' + +- name: Cleanup test DB + sudo_user: postgres + sudo: True + postgresql_db: + name: '{{ db_name }}' + state: 'absent' + +- shell: echo "select datname, pg_encoding_to_char(encoding), datcollate, datctype from pg_database where datname = '{{ db_name }}';" | psql + sudo_user: postgres + sudo: True + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +# +# Create and destroy user +# +- name: Create a user + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + encrypted: 'yes' + password: "md55c8ccfd9d6711fc69a7eae647fc54f51" + register: result + +- name: Check that ansible reports they were created + assert: + that: + - "result.changed == True" + +- name: Check that they were created + sudo_user: postgres + sudo: True + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + +- name: Check that creating user a second time does nothing + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + encrypted: 'yes' + password: "md55c8ccfd9d6711fc69a7eae647fc54f51" + register: result + +- name: Check that ansible reports no change + assert: + that: + - "result.changed == False" + +- name: Remove user + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + state: 'absent' + register: result + +- name: Check that ansible reports they were removed + assert: + that: + - "result.changed == True" + +- name: Check that they were removed + sudo_user: postgres + sudo: True + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +- name: Check that removing user a second time does nothing + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + state: 'absent' + register: result + +- name: Check that ansible reports no change + assert: + that: + - "result.changed == False" + +- name: Create a user with all role attributes + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + state: "present" + role_attr_flags: "SUPERUSER,CREATEROLE,CREATEDB,INHERIT,login" + +- name: Check that the user has the requested role attributes + sudo_user: postgres + sudo: True + shell: echo "select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'super:t' in result.stdout_lines[-2]" + - "'createrole:t' in result.stdout_lines[-2]" + - "'create:t' in result.stdout_lines[-2]" + - "'inherit:t' in result.stdout_lines[-2]" + - "'login:t' in result.stdout_lines[-2]" + +- name: Modify a user to have no role attributes + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + state: "present" + role_attr_flags: "NOSUPERUSER,NOCREATEROLE,NOCREATEDB,noinherit,NOLOGIN" + register: result + +- name: Check that ansible reports it modified the role + assert: + that: + - "result.changed == True" + +- name: Check that the user has the requested role attributes + sudo_user: postgres + sudo: True + shell: echo "select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'super:f' in result.stdout_lines[-2]" + - "'createrole:f' in result.stdout_lines[-2]" + - "'create:f' in result.stdout_lines[-2]" + - "'inherit:f' in result.stdout_lines[-2]" + - "'login:f' in result.stdout_lines[-2]" + +- name: Modify a single role attribute on a user + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + state: "present" + role_attr_flags: "LOGIN" + register: result + +- name: Check that ansible reports it modified the role + assert: + that: + - "result.changed == True" + +- name: Check that the user has the requested role attributes + sudo_user: postgres + sudo: True + shell: echo "select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'super:f' in result.stdout_lines[-2]" + - "'createrole:f' in result.stdout_lines[-2]" + - "'create:f' in result.stdout_lines[-2]" + - "'inherit:f' in result.stdout_lines[-2]" + - "'login:t' in result.stdout_lines[-2]" + +- name: Cleanup the user + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + state: 'absent' + +- name: Check that they were removed + sudo_user: postgres + sudo: True + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +### TODO: test expires, fail_on_user + +# +# Test db ownership +# +- name: Create an unprivileged user to own a DB + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + encrypted: 'yes' + password: "md55c8ccfd9d6711fc69a7eae647fc54f51" + +- name: Create db with user ownership + sudo_user: postgres + sudo: True + postgresql_db: + name: "{{ db_name }}" + state: "present" + owner: "{{ db_user1 }}" + +- name: Check that the user owns the newly created DB + sudo_user: postgres + sudo: True + shell: echo "select pg_catalog.pg_get_userbyid(datdba) from pg_catalog.pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user1 }}' == '{{ result.stdout_lines[-2] | trim }}'" + +- name: Change the owner on an existing db + sudo_user: postgres + sudo: True + postgresql_db: + name: "{{ db_name }}" + state: "present" + owner: "postgres" + register: result + +- name: assert that ansible says it changed the db + assert: + that: + - "result.changed == True" + +- name: Check that the user owns the newly created DB + sudo_user: postgres + sudo: True + shell: echo "select pg_catalog.pg_get_userbyid(datdba) from pg_catalog.pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'postgres' == '{{ result.stdout_lines[-2] | trim }}'" + +- name: Cleanup db + sudo_user: postgres + sudo: True + postgresql_db: + name: "{{ db_name }}" + state: "absent" + +- name: Check that database was destroyed + sudo_user: postgres + sudo: True + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +- name: Cleanup test user + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + state: 'absent' + +- name: Check that they were removed + sudo_user: postgres + sudo: True + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +# +# Test settings privleges +# +- name: Create db + sudo_user: postgres + sudo: True + postgresql_db: + name: "{{ db_name }}" + state: "present" + +- name: Create some tables on the db + sudo_user: postgres + sudo: True + shell: echo "create table test_table1 (field text);" | psql {{ db_name }} + +- sudo_user: postgres + sudo: True + shell: echo "create table test_table2 (field text);" | psql {{ db_name }} + +- name: Create a user with some permissions on the db + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + encrypted: 'yes' + password: "md55c8ccfd9d6711fc69a7eae647fc54f51" + db: "{{ db_name }}" + priv: 'test_table1:INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER/test_table2:INSERT/CREATE,CONNECT,TEMP' + +- name: Check that the user has the requested permissions (table1) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }} + register: result_table1 + +- name: Check that the user has the requested permissions (table2) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + register: result_table2 + +- name: Check that the user has the requested permissions (database) + sudo_user: postgres + sudo: True + shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }} + register: result_database + +- assert: + that: + - "result_table1.stdout_lines[-1] == '(7 rows)'" + - "'INSERT' in result_table1.stdout" + - "'SELECT' in result_table1.stdout" + - "'UPDATE' in result_table1.stdout" + - "'DELETE' in result_table1.stdout" + - "'TRUNCATE' in result_table1.stdout" + - "'REFERENCES' in result_table1.stdout" + - "'TRIGGER' in result_table1.stdout" + - "result_table2.stdout_lines[-1] == '(1 row)'" + - "'INSERT' == '{{ result_table2.stdout_lines[-2] | trim }}'" + - "result_database.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user1 }}=CTc/postgres' in result_database.stdout_lines[-2]" + +- name: Add another permission for the user + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + encrypted: 'yes' + password: "md55c8ccfd9d6711fc69a7eae647fc54f51" + db: "{{ db_name }}" + priv: 'test_table2:select' + register: results + +- name: Check that ansible reports it changed the user + assert: + that: + - "results.changed == True" + +- name: Check that the user has the requested permissions (table2) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + register: result_table2 + +- assert: + that: + - "result_table2.stdout_lines[-1] == '(2 rows)'" + - "'INSERT' in result_table2.stdout" + - "'SELECT' in result_table2.stdout" + + +# +# Test priv setting via postgresql_privs module +# (Depends on state from previous _user privs tests) +# + +- name: Revoke a privilege + sudo_user: postgres + sudo: True + postgresql_privs: + type: "table" + state: "absent" + roles: "{{ db_user1 }}" + privs: "INSERT" + objs: "test_table2" + db: "{{ db_name }}" + register: results + +- name: Check that ansible reports it changed the user + assert: + that: + - "results.changed == True" + +- name: Check that the user has the requested permissions (table2) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + register: result_table2 + +- assert: + that: + - "result_table2.stdout_lines[-1] == '(1 row)'" + - "'SELECT' == '{{ result_table2.stdout_lines[-2] | trim }}'" + +- name: Revoke many privileges on multiple tables + sudo_user: postgres + sudo: True + postgresql_privs: + state: "absent" + roles: "{{ db_user1 }}" + privs: "INSERT,select,UPDATE,TRUNCATE,REFERENCES,TRIGGER,delete" + objs: "test_table2,test_table1" + db: "{{ db_name }}" + register: results + +- name: Check that ansible reports it changed the user + assert: + that: + - "results.changed == True" + +- name: Check that permissions were revoked (table1) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }} + register: result_table1 + +- name: Check that permissions were revoked (table2) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + register: result_table2 + +- assert: + that: + - "result_table1.stdout_lines[-1] == '(0 rows)'" + - "result_table2.stdout_lines[-1] == '(0 rows)'" + +- name: Revoke database privileges + sudo_user: postgres + sudo: True + postgresql_privs: + type: "database" + state: "absent" + roles: "{{ db_user1 }}" + privs: "Create,connect,TEMP" + objs: "{{ db_name }}" + db: "{{ db_name }}" + +- name: Check that the user has the requested permissions (database) + sudo_user: postgres + sudo: True + shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }} + register: result_database + +- assert: + that: + - "result_database.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user1 }}' not in result_database.stdout" + +- name: Grant database privileges + sudo_user: postgres + sudo: True + postgresql_privs: + type: "database" + state: "present" + roles: "{{ db_user1 }}" + privs: "CREATE,connect" + objs: "{{ db_name }}" + db: "{{ db_name }}" + register: results + +- name: Check that ansible reports it changed the user + assert: + that: + - "results.changed == True" + +- name: Check that the user has the requested permissions (database) + sudo_user: postgres + sudo: True + shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }} + register: result_database + +- assert: + that: + - "result_database.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user1 }}=Cc' in result_database.stdout" + +- name: Grant a single privilege on a table + sudo_user: postgres + sudo: True + postgresql_privs: + state: "present" + roles: "{{ db_user1 }}" + privs: "INSERT" + objs: "test_table1" + db: "{{ db_name }}" + +- name: Check that permissions were added (table1) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }} + register: result_table1 + +- assert: + that: + - "result_table1.stdout_lines[-1] == '(1 row)'" + - "'{{ result_table1.stdout_lines[-2] | trim }}' == 'INSERT'" + +- name: Grant many privileges on multiple tables + sudo_user: postgres + sudo: True + postgresql_privs: + state: "present" + roles: "{{ db_user1 }}" + privs: 'INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,trigger' + objs: "test_table2,test_table1" + db: "{{ db_name }}" + +- name: Check that permissions were added (table1) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }} + register: result_table1 + +- name: Check that permissions were added (table2) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + register: result_table2 + +- assert: + that: + - "result_table1.stdout_lines[-1] == '(7 rows)'" + - "'INSERT' in result_table1.stdout" + - "'SELECT' in result_table1.stdout" + - "'UPDATE' in result_table1.stdout" + - "'DELETE' in result_table1.stdout" + - "'TRUNCATE' in result_table1.stdout" + - "'REFERENCES' in result_table1.stdout" + - "'TRIGGER' in result_table1.stdout" + - "result_table2.stdout_lines[-1] == '(7 rows)'" + - "'INSERT' in result_table2.stdout" + - "'SELECT' in result_table2.stdout" + - "'UPDATE' in result_table2.stdout" + - "'DELETE' in result_table2.stdout" + - "'TRUNCATE' in result_table2.stdout" + - "'REFERENCES' in result_table2.stdout" + - "'TRIGGER' in result_table2.stdout" + +# +# Cleanup +# +- name: Cleanup db + sudo_user: postgres + sudo: True + postgresql_db: + name: "{{ db_name }}" + state: "absent" + +- name: Check that database was destroyed + sudo_user: postgres + sudo: True + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +- name: Cleanup test user + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + state: 'absent' + +- name: Check that they were removed + sudo_user: postgres + sudo: True + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +# +# Test login_user functionality +# +- name: Create a user to test login module parameters + sudo: True + sudo_user: postgres + postgresql_user: + name: "{{ db_user1 }}" + state: "present" + encrypted: 'no' + password: "password" + role_attr_flags: "CREATEDB,LOGIN,CREATEROLE" + +- name: Create db + postgresql_db: + name: "{{ db_name }}" + state: "present" + login_user: "{{ db_user1 }}" + login_password: "password" + login_host: "localhost" + +- name: Check that database created + sudo: True + sudo_user: postgres + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + +- name: Create a user + postgresql_user: + name: "{{ db_user2 }}" + state: "present" + encrypted: 'yes' + password: "md55c8ccfd9d6711fc69a7eae647fc54f51" + db: "{{ db_name }}" + login_user: "{{ db_user1 }}" + login_password: "password" + login_host: "localhost" + +- name: Check that they were created + sudo: True + sudo_user: postgres + shell: echo "select * from pg_user where usename='{{ db_user2 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + +- name: Grant database privileges + postgresql_privs: + type: "database" + state: "present" + roles: "{{ db_user2 }}" + privs: "CREATE,connect" + objs: "{{ db_name }}" + db: "{{ db_name }}" + login: "{{ db_user1 }}" + password: "password" + host: "localhost" + +- name: Check that the user has the requested permissions (database) + sudo: True + sudo_user: postgres + shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }} + register: result_database + +- assert: + that: + - "result_database.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user2 }}=Cc' in result_database.stdout" + +- name: Remove user + postgresql_user: + name: "{{ db_user2 }}" + state: 'absent' + priv: "ALL" + db: "{{ db_name }}" + login_user: "{{ db_user1 }}" + login_password: "password" + login_host: "localhost" + +- name: Check that they were removed + sudo: True + sudo_user: postgres + shell: echo "select * from pg_user where usename='{{ db_user2 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +- name: Destroy DB + postgresql_db: + state: absent + name: "{{ db_name }}" + login_user: "{{ db_user1 }}" + login_password: "password" + login_host: "localhost" + +- name: Check that database was destroyed + sudo: True + sudo_user: postgres + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +# +# Cleanup +# +- name: Cleanup test user + sudo: True + sudo_user: postgres + postgresql_user: + name: "{{ db_user1 }}" + state: 'absent' + +- name: Check that they were removed + sudo: True + sudo_user: postgres + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + diff --git a/test/integration/roles/test_rax/meta/main.yml b/test/integration/roles/test_rax/meta/main.yml new file mode 100644 index 00000000000..a3f85b642e3 --- /dev/null +++ b/test/integration/roles/test_rax/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - prepare_rax_tests diff --git a/test/integration/roles/test_rax/tasks/main.yml b/test/integration/roles/test_rax/tasks/main.yml new file mode 100644 index 00000000000..e91c0a949fe --- /dev/null +++ b/test/integration/roles/test_rax/tasks/main.yml @@ -0,0 +1,839 @@ +# ============================================================ +- name: Test rax with no args + rax: + ignore_errors: true + register: rax + +- name: Validate results of rax with no args + assert: + that: + - rax|failed + - rax.msg == 'No credentials supplied!' +# ============================================================ + + + +# ============================================================ +- name: Test rax with credentials + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + ignore_errors: true + register: rax + +- name: Validate results of rax with only creds + assert: + that: + - rax|failed + - rax.msg.startswith('None is not a valid region') +# ============================================================ + + + +# ============================================================ +- name: Test rax with creds and region + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + ignore_errors: true + register: rax + +- name: Validate rax creds and region + assert: + that: + - rax|failed + - rax.msg == 'image is required for the "rax" module' +# ============================================================ + + + +# ============================================================ +- name: Test rax with creds, region and image + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + ignore_errors: true + register: rax + +- name: Validate rax with creds, region and image + assert: + that: + - rax|failed + - rax.msg == 'flavor is required for the "rax" module' +# ============================================================ + + + +# ============================================================ +- name: Test rax with creds, region, image and flavor + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + ignore_errors: true + register: rax + +- name: Validate rax with creds, region, image and flavor + assert: + that: + - rax|failed + - rax.msg == 'name is required for the "rax" module' +# ============================================================ + + + +# ============================================================ +- name: Test rax with creds, region, image, flavor and name + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-1" + register: rax + +- name: Validate rax with creds, region, image, flavor and name + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 1 + - rax.instances[0].name == "{{ resource_prefix }}-1" + - rax.instances[0] == rax.success[0] + - rax.instances[0].rax_status == 'BUILD' + +- name: "Delete integration 1" + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-1" + state: absent + wait: true + register: rax + +- name: "Validate delete integration 1" + assert: + that: + - rax|changed + - rax.action == 'delete' + - rax.success[0].name == "{{ resource_prefix }}-1" +# ============================================================ + + + +# ============================================================ +- name: Test rax basic idempotency 1 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-2" + wait: true + register: rax + +- name: Validate rax basic idepmpotency 1 + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 1 + - rax.instances[0].name == "{{ resource_prefix }}-2" + - rax.instances[0] == rax.success[0] + - rax.instances[0].rax_status == 'ACTIVE' + +- name: Test rax basic idempotency 2 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-2" + wait: true + register: rax + +- name: Validate rax basic idempotency 2 + assert: + that: + - rax|success + - not rax|changed + - not rax.action + - rax.instances|length == 1 + - rax.instances[0].name == "{{ resource_prefix }}-2" + - not rax.success + +- name: "Delete integration 2" + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-2" + state: absent + wait: true + register: rax + +- name: "Validate delete integration 2" + assert: + that: + - rax|success + - rax|changed + - rax.action == 'delete' + - rax.success[0].name == "{{ resource_prefix }}-2" + - rax.success[0].rax_status == "DELETED" +# ============================================================ + + + +# ============================================================ +- name: Test rax basic idempotency with meta 1 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-3" + meta: + foo: bar + wait: true + register: rax + +- name: Validate rax basic idepmpotency with meta 1 + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 1 + - rax.instances[0].name == "{{ resource_prefix }}-3" + - rax.instances[0] == rax.success[0] + - rax.instances[0].rax_status == 'ACTIVE' + - rax.instances[0].rax_metadata.foo == 'bar' + +- name: Test rax basic idempotency with meta 2 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-3" + meta: + foo: bar + wait: true + register: rax + +- name: Validate rax basic idempotency with meta 2 + assert: + that: + - rax|success + - not rax|changed + - not rax.action + - rax.instances|length == 1 + - rax.instances[0].name == "{{ resource_prefix }}-3" + - not rax.success + +- name: "Delete integration 3" + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-3" + state: absent + meta: + foo: bar + wait: true + register: rax + +- name: "Validate delete integration 3" + assert: + that: + - rax|success + - rax|changed + - rax.action == 'delete' + - rax.success[0].name == "{{ resource_prefix }}-3" + - rax.success[0].rax_status == "DELETED" +# ============================================================ + + + +# ============================================================ +- name: Test rax basic idempotency multi server 1 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-4" + count: 2 + wait: true + register: rax + +- name: Validate rax basic idepmpotency multi server 1 + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 2 + - rax.instances == rax.success + +- name: Test rax basic idempotency multi server 2 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-4" + count: 2 + wait: true + register: rax + +- name: Validate rax basic idempotency multi server 2 + assert: + that: + - rax|success + - not rax|changed + - not rax.action + - rax.instances|length == 2 + - not rax.success + +- name: Test rax basic idempotency multi server 3 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-4" + count: 3 + wait: true + register: rax + +- name: Validate rax basic idempotency multi server 3 + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 3 + - rax.success|length == 1 + +- name: "Delete integration 4" + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-4" + count: 3 + state: absent + wait: true + register: rax + +- name: "Validate delete integration 4" + assert: + that: + - rax|success + - rax|changed + - rax.action == 'delete' + - rax.success|length == 3 + - not rax.instances +# ============================================================ + + + +# ============================================================ +- name: Test rax multi server group without exact_count 1 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-5-%02d" + count: 2 + group: "{{ resource_prefix }}-5" + wait: true + register: rax + +- name: Validate rax multi server group without exact_count 1 + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 2 + - rax.instances == rax.success + - rax.instances|map(attribute='rax_name')|unique|length == 2 + +- name: "Test delete integration 5" + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-5-%02d" + count: 2 + group: "{{ resource_prefix }}-5" + wait: true + state: absent + register: rax + +- name: "Validate delete integration 5" + assert: + that: + - rax|success + - rax|changed + - rax.action == 'delete' + - rax.success|length == 2 + - not rax.instances +# ============================================================ + + + +# ============================================================ +- name: Test rax multi server group without exact_count non-idempotency 1 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-6-%02d" + count: 2 + group: "{{ resource_prefix }}-6" + wait: true + register: rax + +- name: Validate rax multi server group without exact_count non-idempotency 1 + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 2 + - rax.instances == rax.success + - rax.instances|map(attribute='rax_name')|unique|length == 2 + +- name: Test rax multi server group without exact_count non-idempotency 2 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-6-%02d" + count: 2 + group: "{{ resource_prefix }}-6" + wait: true + register: rax + +- name: Validate rax multi server group without exact_count non-idempotency 2 + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 4 + - rax.instances|map(attribute='rax_name')|unique|length == 4 + +- name: "Test delete integration 6" + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-6-%02d" + count: 4 + group: "{{ resource_prefix }}-6" + wait: true + state: absent + register: rax + +- name: "Validate delete integration 6" + assert: + that: + - rax|success + - rax|changed + - rax.action == 'delete' + - rax.success|length == 4 + - not rax.instances +# ============================================================ + + + +# ============================================================ +- name: Test rax multi server group with exact_count 1 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-7-%02d" + count: 2 + exact_count: true + group: "{{ resource_prefix }}-7" + wait: true + register: rax + +- name: Validate rax multi server group with exact_count 1 + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 2 + - rax.instances == rax.success + - rax.instances|map(attribute='rax_name')|unique|length == 2 + +- name: Test rax multi server group with exact_count 2 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-7-%02d" + count: 2 + exact_count: true + group: "{{ resource_prefix }}-7" + wait: true + register: rax + +- name: Validate rax multi server group with exact_count 2 + assert: + that: + - rax|success + - not rax|changed + - not rax.action + - rax.instances|length == 2 + - rax.instances|map(attribute='rax_name')|unique|length == 2 + +- name: Test rax multi server group with exact_count 3 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-7-%02d" + count: 4 + exact_count: true + group: "{{ resource_prefix }}-7" + wait: true + register: rax + +- name: Validate rax multi server group with exact_count 3 + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 4 + - rax.success|length == 2 + - rax.instances|map(attribute='rax_name')|unique|length == 4 + + +- name: "Test delete integration 7" + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-7-%02d" + count: 0 + exact_count: true + group: "{{ resource_prefix }}-7" + wait: true + register: rax + +- name: "Validate delete integration 7" + assert: + that: + - rax|success + - rax|changed + - rax.action == 'delete' + - rax.success|length == 4 + - not rax.instances +# ============================================================ + + + +# ============================================================ +- name: Test rax multi server group without exact_count and disabled auto_increment 1 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-8" + count: 2 + group: "{{ resource_prefix }}-8" + auto_increment: false + wait: true + register: rax + +- name: Validate rax multi server group without exact_count and disabled auto_increment 1 + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 2 + - rax.instances == rax.success + - rax.instances|map(attribute='rax_name')|unique|length == 1 + +- name: "Test delete integration 8" + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-8" + count: 2 + group: "{{ resource_prefix }}-8" + auto_increment: false + wait: true + state: absent + register: rax + +- name: "Validate delete integration 8" + assert: + that: + - rax|success + - rax|changed + - rax.action == 'delete' + - rax.success|length == 2 + - not rax.instances +# ============================================================ + + + +# ============================================================ +- name: Test rax multi server group with exact_count and no printf 1 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-9" + count: 2 + exact_count: true + group: "{{ resource_prefix }}-9" + wait: true + register: rax + +- name: Validate rax multi server group with exact_count and no printf 1 + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 2 + - rax.instances == rax.success + - rax.instances|map(attribute='rax_name')|unique|list|sort == ['{{ resource_prefix }}-91', '{{ resource_prefix }}-92'] + +- name: "Test delete integration 9" + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-9" + count: 0 + exact_count: true + group: "{{ resource_prefix }}-9" + wait: true + register: rax + +- name: "Validate delete integration 9" + assert: + that: + - rax|success + - rax|changed + - rax.action == 'delete' + - rax.success|length == 2 + - not rax.instances +# ============================================================ + + + +# ============================================================ +- name: Test rax multi server group with exact_count and offset 1 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-10-%03d" + count: 2 + count_offset: 10 + exact_count: true + group: "{{ resource_prefix }}-10" + wait: true + register: rax + +- name: Validate rax multi server group with exact_count and offset 1 + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 2 + - rax.instances == rax.success + - rax.instances|map(attribute='rax_name')|unique|list|sort == ['{{ resource_prefix }}-10-010', '{{ resource_prefix }}-10-011'] + +- name: "Test delete integration 10" + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-10-%03d" + count: 0 + count_offset: 10 + exact_count: true + group: "{{ resource_prefix }}-10" + wait: true + register: rax + +- name: "Validate delete integration 10" + assert: + that: + - rax|success + - rax|changed + - rax.action == 'delete' + - rax.success|length == 2 + - not rax.instances +# ============================================================ + + + +# ============================================================ +- name: Test rax multi server group with exact_count and offset 1 + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-11-%03d" + count: 2 + count_offset: 10 + exact_count: true + group: "{{ resource_prefix }}-11" + wait: true + register: rax + +- name: Validate rax multi server group with exact_count and offset 1 + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 2 + - rax.instances == rax.success + - rax.instances|map(attribute='rax_name')|unique|list|sort == ['{{ resource_prefix }}-11-010', '{{ resource_prefix }}-11-011'] + +- name: "Test delete integration 11" + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-11-%03d" + count: 0 + count_offset: 10 + exact_count: true + group: "{{ resource_prefix }}-11" + wait: true + register: rax + +- name: "Validate delete integration 11" + assert: + that: + - rax|success + - rax|changed + - rax.action == 'delete' + - rax.success|length == 2 + - not rax.instances +# ============================================================ + + + +# ============================================================ +- name: Test rax instance_ids absent 1 (create) + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-12" + wait: true + register: rax + +- name: Validate rax instance_ids absent 1 (create) + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 1 + - rax.instances[0].name == "{{ resource_prefix }}-12" + - rax.instances[0] == rax.success[0] + - rax.instances[0].rax_status == 'ACTIVE' + +- name: Test rax instance_ids absent 2 (delete) + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + instance_ids: + - "{{ rax.success.0.rax_id }}" + state: absent + wait: true + register: rax2 + +- name: Validate rax instance_ids absent 2 (delete) + assert: + that: + - rax2|success + - rax2|changed + - rax2.action == 'delete' + - rax2.success.0.rax_id == rax.success.0.rax_id +# ============================================================ diff --git a/test/integration/roles/test_rax_cbs/meta/main.yml b/test/integration/roles/test_rax_cbs/meta/main.yml new file mode 100644 index 00000000000..a3f85b642e3 --- /dev/null +++ b/test/integration/roles/test_rax_cbs/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - prepare_rax_tests diff --git a/test/integration/roles/test_rax_cbs/tasks/main.yml b/test/integration/roles/test_rax_cbs/tasks/main.yml new file mode 100644 index 00000000000..90395e4fe20 --- /dev/null +++ b/test/integration/roles/test_rax_cbs/tasks/main.yml @@ -0,0 +1,346 @@ +# ============================================================ +- name: Test rax_cbs with no args + rax_cbs: + ignore_errors: true + register: rax_cbs + +- name: Validate results of rax_cbs with no args + assert: + that: + - rax_cbs|failed + - rax_cbs.msg == 'missing required arguments: name' +# ============================================================ + + + +# ============================================================ +- name: Test rax_cbs with name + rax_cbs: + name: fail + ignore_errors: true + register: rax_cbs + +- name: Validate results of rax_cbs with no args + assert: + that: + - rax_cbs|failed + - rax_cbs.msg == 'No credentials supplied!' +# ============================================================ + + + +# ============================================================ +- name: Test rax_cbs with name and credentials + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + name: fail + ignore_errors: true + register: rax_cbs + +- name: Validate results of rax_cbs with name and credentials + assert: + that: + - rax_cbs|failed + - rax_cbs.msg.startswith('None is not a valid region') +# ============================================================ + + + +# ============================================================ +- name: Test rax_cbs with creds, region, name and public_key string + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-1" + wait: true + register: rax_cbs + +- name: Validate rax_cbs creds, region and name + assert: + that: + - rax_cbs|success + - rax_cbs|changed + - rax_cbs.volume.display_name == "{{ resource_prefix }}-1" + - rax_cbs.volume.attachments == [] + - rax_cbs.volume.size == 100 + - rax_cbs.volume.volume_type == 'SATA' + - rax_cbs.volume.status == 'available' + +- name: Delete integration 1 + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-1" + state: absent + register: rax_cbs + +- name: Validate delete integration 1 + assert: + that: + - rax_cbs|success + - rax_cbs|changed + - rax_cbs.volume.display_name == "{{ resource_prefix }}-1" +# ============================================================ + + + + +# ============================================================ +- name: Test rax_cbs with creds, region, name and invalid size + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: fail + size: 1 + ignore_errors: true + register: rax_cbs + +- name: Validate rax_cbs creds, region, name and invalid size + assert: + that: + - rax_cbs|failed + - rax_cbs.msg == '"size" must be greater than or equal to 100' +# ============================================================ + + + +# ============================================================ +- name: Test rax_cbs with creds, region, name and valid size + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-2" + size: 150 + wait: true + register: rax_cbs + +- name: Validate rax_cbs creds, region and valid size + assert: + that: + - rax_cbs|success + - rax_cbs|changed + - rax_cbs.volume.display_name == "{{ resource_prefix }}-2" + - rax_cbs.volume.attachments == [] + - rax_cbs.volume.size == 150 + - rax_cbs.volume.volume_type == 'SATA' + - rax_cbs.volume.status == 'available' + +- name: Delete integration 2 + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-2" + state: absent + register: rax_cbs + +- name: Validate delete integration 2 + assert: + that: + - rax_cbs|success + - rax_cbs|changed + - rax_cbs.volume.display_name == "{{ resource_prefix }}-2" +# ============================================================ + + + +# ============================================================ +- name: Test rax_cbs with creds, region, name and invalid volume_type + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: fail + volume_type: fail + ignore_errors: true + register: rax_cbs + +- name: Validate rax_cbs creds, region, name and invalid volume_type + assert: + that: + - rax_cbs|failed + - "rax_cbs.msg == 'value of volume_type must be one of: SSD,SATA, got: fail'" +# ============================================================ + + + +# ============================================================ +- name: Test rax_cbs with creds, region, name and valid volume_size + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-3" + volume_type: SSD + wait: true + register: rax_cbs + +- name: Validate rax_cbs creds, region and valid volume_size + assert: + that: + - rax_cbs|success + - rax_cbs|changed + - rax_cbs.volume.display_name == "{{ resource_prefix }}-3" + - rax_cbs.volume.attachments == [] + - rax_cbs.volume.size == 100 + - rax_cbs.volume.volume_type == 'SSD' + - rax_cbs.volume.status == 'available' + +- name: Delete integration 3 + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-3" + state: absent + register: rax_cbs + +- name: Validate delete integration 3 + assert: + that: + - rax_cbs|success + - rax_cbs|changed + - rax_cbs.volume.display_name == "{{ resource_prefix }}-3" +# ============================================================ + + + +# ============================================================ +- name: Test rax_cbs with creds, region, name and description + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-4" + description: "{{ resource_prefix }}-4 description" + wait: true + register: rax_cbs + +- name: Validate rax_cbs creds, region and description + assert: + that: + - rax_cbs|success + - rax_cbs|changed + - rax_cbs.volume.display_name == "{{ resource_prefix }}-4" + - rax_cbs.volume.description == '{{ resource_prefix }}-4 description' + - rax_cbs.volume.attachments == [] + - rax_cbs.volume.size == 100 + - rax_cbs.volume.volume_type == 'SATA' + - rax_cbs.volume.status == 'available' + +- name: Delete integration 4 + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-4" + state: absent + register: rax_cbs + +- name: Validate delete integration 4 + assert: + that: + - rax_cbs|success + - rax_cbs|changed + - rax_cbs.volume.display_name == "{{ resource_prefix }}-4" +# ============================================================ + + + +# ============================================================ +- name: Test rax_cbs with creds, region, name and meta + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-5" + meta: + foo: bar + wait: true + register: rax_cbs + +- name: Validate rax_cbs creds, region and meta + assert: + that: + - rax_cbs|success + - rax_cbs|changed + - rax_cbs.volume.display_name == "{{ resource_prefix }}-5" + - rax_cbs.volume.attachments == [] + - rax_cbs.volume.size == 100 + - rax_cbs.volume.volume_type == 'SATA' + - rax_cbs.volume.status == 'available' + - rax_cbs.volume.metadata.foo == 'bar' + +- name: Delete integration 5 + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-5" + state: absent + register: rax_cbs + +- name: Validate delete integration 5 + assert: + that: + - rax_cbs|success + - rax_cbs|changed + - rax_cbs.volume.display_name == "{{ resource_prefix }}-5" +# ============================================================ + + + +# ============================================================ +- name: Test rax_cbs with idempotency 1 + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-6" + wait: true + register: rax_cbs_1 + +- name: Validate rax_cbs with idempotency 1 + assert: + that: + - rax_cbs_1|success + - rax_cbs_1|changed + - rax_cbs_1.volume.display_name == "{{ resource_prefix }}-6" + +- name: Test rax_cbs with idempotency 2 + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-6" + register: rax_cbs_2 + +- name: Validate rax_cbs with idempotency 2 + assert: + that: + - rax_cbs_2|success + - not rax_cbs_2|changed + - rax_cbs_2.volume.display_name == "{{ resource_prefix }}-6" + - rax_cbs_2.volume.id == rax_cbs_1.volume.id + +- name: Delete integration 6 + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-6" + state: absent + register: rax_cbs + +- name: Validate delete integration 6 + assert: + that: + - rax_cbs|success + - rax_cbs|changed + - rax_cbs.volume.name == "{{ resource_prefix }}-6" +# ============================================================ diff --git a/test/integration/roles/test_rax_cbs_attachments/meta/main.yml b/test/integration/roles/test_rax_cbs_attachments/meta/main.yml new file mode 100644 index 00000000000..a3f85b642e3 --- /dev/null +++ b/test/integration/roles/test_rax_cbs_attachments/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - prepare_rax_tests diff --git a/test/integration/roles/test_rax_cbs_attachments/tasks/main.yml b/test/integration/roles/test_rax_cbs_attachments/tasks/main.yml new file mode 100644 index 00000000000..6750105c1e6 --- /dev/null +++ b/test/integration/roles/test_rax_cbs_attachments/tasks/main.yml @@ -0,0 +1,253 @@ +# ============================================================ +- name: Test rax_cbs_attachments with no args + rax_cbs_attachments: + ignore_errors: true + register: rax_cbs_attachments + +- name: Validate results of rax_cbs_attachments with no args + assert: + that: + - rax_cbs_attachments|failed + - rax_cbs_attachments.msg == 'missing required arguments: server,volume,device' +# ============================================================ + + + +# ============================================================ +- name: Test rax_cbs_attachments with server, volume and device + rax_cbs_attachments: + server: '1234' + volume: '1234' + device: /dev/xvde + ignore_errors: true + register: rax_cbs_attachments + +- name: Validate results of rax_cbs_attachments with server, volume and device + assert: + that: + - rax_cbs_attachments|failed + - rax_cbs_attachments.msg == 'No credentials supplied!' +# ============================================================ + + + +# ============================================================ +- name: Test rax_cbs_attachments with credentials, server, volume and device + rax_cbs_attachments: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + server: '1234' + volume: '1234' + device: /dev/xvde + ignore_errors: true + register: rax_cbs_attachments + +- name: Validate results of rax_cbs_attachments with credentials, server, volume and device + assert: + that: + - rax_cbs_attachments|failed + - rax_cbs_attachments.msg.startswith('None is not a valid region') +# ============================================================ + + + +# ============================================================ +- name: Test rax_cbs_attachments with creds, region, invalid server, invalid volume and device + rax_cbs_attachments: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + server: '1234' + volume: '1234' + device: /dev/xvde + ignore_errors: true + register: rax_cbs_attachments + +- name: Validate rax_cbs_attachments creds, region, invalid server, invalid volume and device + assert: + that: + - rax_cbs_attachments|failed + - rax_cbs_attachments.msg == 'No matching storage volumes were found' +# ============================================================ + + + +# ============================================================ +- name: Build Volume for rax_cbs_attachments test + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-rax_cbs_attachments" + wait: true + register: rax_cbs + +- name: Validate volume build + assert: + that: + - rax_cbs|success + - rax_cbs|changed + - rax_cbs.volume.display_name == "{{ resource_prefix }}-rax_cbs_attachments" +# ============================================================ + + + +# ============================================================ +- name: Build CloudServer for rax_cbs_attachments tests + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-rax_cbs_attachments" + wait: true + register: rax + +- name: Validate CloudServer build + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 1 + - rax.instances[0].name == "{{ resource_prefix }}-rax_cbs_attachments" +# ============================================================ + + + +# ============================================================ +- name: Test rax_cbs_attachments with creds, region, invalid server, volume and device + rax_cbs_attachments: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + server: '1234' + volume: "{{ rax_cbs.volume.id }}" + device: /dev/xvde + ignore_errors: true + register: rax_cbs_attachments + +- name: Validate rax_cbs_attachments creds, region, invalid server, volume and device + assert: + that: + - rax_cbs_attachments|failed + - rax_cbs_attachments.msg == 'No Server was matched by name, try using the Server ID instead' +# ============================================================ + + + +# ============================================================ +- name: Test rax_cbs_attachments with creds, region, server, volume and device (valid) + rax_cbs_attachments: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + server: "{{ rax.instances[0].id }}" + volume: "{{ rax_cbs.volume.id }}" + device: /dev/xvde + wait: true + register: rax_cbs_attachments + +- name: Validate rax_cbs_attachments creds, region, server, volume and device (valid) + assert: + that: + - rax_cbs_attachments|success + - rax_cbs_attachments|changed + - rax_cbs_attachments.volume.attachments.0.device == '/dev/xvde' + - rax_cbs_attachments.volume.attachments.0.server_id == "{{ rax.instances[0].id }}" + +- name: Idempotent present test + rax_cbs_attachments: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + server: "{{ rax.instances[0].id }}" + volume: "{{ rax_cbs.volume.id }}" + device: /dev/xvde + wait: true + register: rax_cbs_attachments + +- name: Validate idempotent present test + assert: + that: + - rax_cbs_attachments|success + - not rax_cbs_attachments|changed + +- name: Unattach volume + rax_cbs_attachments: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + server: "{{ rax.instances[0].id }}" + volume: "{{ rax_cbs.volume.id }}" + device: /dev/xvde + wait: true + state: absent + register: rax_cbs_attachments + +- name: Validate unattach volume + assert: + that: + - rax_cbs_attachments|success + - rax_cbs_attachments|changed + - rax_cbs_attachments.volume.attachments == [] + +- name: Idempotent absent test + rax_cbs_attachments: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + server: "{{ rax.instances[0].id }}" + volume: "{{ rax_cbs.volume.id }}" + device: /dev/xvde + wait: true + state: absent + register: rax_cbs_attachments + +- name: Validate idempotent absent test + assert: + that: + - rax_cbs_attachments|success + - not rax_cbs_attachments|changed +# ============================================================ + + + +# ============================================================ +- name: Delete test volume + rax_cbs: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ rax_cbs.volume.id }}" + state: absent + register: rax_cbs + +- name: Validate delete integration 6 + assert: + that: + - rax_cbs|success + - rax_cbs|changed +# ============================================================ + + + +# ============================================================ +- name: "Delete CloudServer" + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + instance_ids: "{{ rax.instances[0].id }}" + state: absent + wait: true + register: rax + +- name: "Validate delete" + assert: + that: + - rax|changed + - rax|success + - rax.action == 'delete' +# ============================================================ diff --git a/test/integration/roles/test_rax_cdb/meta/main.yml b/test/integration/roles/test_rax_cdb/meta/main.yml new file mode 100644 index 00000000000..a3f85b642e3 --- /dev/null +++ b/test/integration/roles/test_rax_cdb/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - prepare_rax_tests diff --git a/test/integration/roles/test_rax_cdb/tasks/main.yml b/test/integration/roles/test_rax_cdb/tasks/main.yml new file mode 100644 index 00000000000..fe4bdd9c0d9 --- /dev/null +++ b/test/integration/roles/test_rax_cdb/tasks/main.yml @@ -0,0 +1,270 @@ +# ============================================================ +- name: Test rax_cdb with no args + rax_cdb: + ignore_errors: true + register: rax_cdb + +- name: Validate results of rax_cdb with no args + assert: + that: + - rax_cdb|failed + - rax_cdb.msg == 'missing required arguments: name' +# ============================================================ + + + +# ============================================================ +- name: Test rax_cdb with name + rax_cdb: + name: fail + ignore_errors: true + register: rax_cdb + +- name: Validate results of rax_cdb with only creds + assert: + that: + - rax_cdb|failed + - rax_cdb.msg == 'No credentials supplied!' +# ============================================================ + + + +# ============================================================ +- name: Test rax_cdb with name and credentials + rax_cdb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + name: fail + ignore_errors: true + register: rax_cdb + +- name: Validate results of rax_cdb with only creds + assert: + that: + - rax_cdb|failed + - rax_cdb.msg.startswith('None is not a valid region') +# ============================================================ + + + +# ============================================================ +- name: Test rax_cdb with creds and region + rax_cdb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + ignore_errors: true + register: rax_cdb + +- name: Validate rax_cdb creds and region + assert: + that: + - rax_cdb|failed + - rax_cdb.msg == 'missing required arguments: name' +# ============================================================ + + + +# ============================================================ +- name: Test rax_cdb with creds, region and name + rax_cdb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-1" + wait: true + register: rax_cdb + +- name: Validate rax_cdb with creds, region and name + assert: + that: + - rax_cdb|success + - rax_cdb|changed + - rax_cdb.cdb.name == '{{ resource_prefix }}-1' + - rax_cdb.cdb.hostname + - rax_cdb.cdb.status == 'ACTIVE' + +- name: "Delete integration 1" + rax_cdb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-1" + state: absent + wait: true + register: rax_cdb + +- name: "Validate delete integration 1" + assert: + that: + - rax_cdb|success + - rax_cdb|changed + - rax_cdb.cdb.name == "{{ resource_prefix }}-1" + +# ============================================================ + + + +# ============================================================ +- name: Test rax_cdb idempotent test 1 + rax_cdb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-2" + wait: true + register: rax_cdb + +- name: Validate rax_cdb idempotent test 1 + assert: + that: + - rax_cdb|success + - rax_cdb|changed + - rax_cdb.cdb.name == "{{ resource_prefix }}-2" + - rax_cdb.cdb.status == 'ACTIVE' + +- name: Test rax_cdb idempotent test 2 + rax_cdb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-2" + wait: true + register: rax_cdb + +- name: Validate rax_cdb idempotent test 2 + assert: + that: + - rax_cdb|success + - not rax_cdb|changed + - rax_cdb.cdb.name == "{{ resource_prefix }}-2" + - rax_cdb.cdb.status == 'ACTIVE' + +- name: "Delete integration 2" + rax_cdb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-2" + state: absent + wait: true + register: rax_cdb + +- name: "Validate delete integration 2" + assert: + that: + - rax_cdb|changed + - rax_cdb.cdb.name == "{{ resource_prefix }}-2" +# ============================================================ + + + +# ============================================================ +- name: Test rax_cdb resize volume 1 + rax_cdb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-3" + wait: true + register: rax_cdb + +- name: Validate rax_cdb resize volume 1 + assert: + that: + - rax_cdb|success + - rax_cdb|changed + - rax_cdb.cdb.name == "{{ resource_prefix }}-3" + - rax_cdb.cdb.status == 'ACTIVE' + +- name: Test rax_cdb resize volume 2 + rax_cdb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-3" + volume: 3 + wait: true + wait_timeout: 600 + register: rax_cdb + +- name: Validate rax_cdb resize volume 2 + assert: + that: + - rax_cdb|success + - rax_cdb|changed + - rax_cdb.cdb.name == "{{ resource_prefix }}-3" + - rax_cdb.cdb.status == 'ACTIVE' + +- name: "Delete integration 3" + rax_cdb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-3" + state: absent + wait: true + register: rax_cdb + +- name: "Validate delete integration 3" + assert: + that: + - rax_cdb|changed + - rax_cdb.cdb.name == "{{ resource_prefix }}-3" +# ============================================================ + + + +# ============================================================ +- name: Test rax_cdb resize flavor 1 + rax_cdb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-4" + wait: true + register: rax_cdb + +- name: Validate rax_cdb resize flavor 1 + assert: + that: + - rax_cdb|success + - rax_cdb|changed + - rax_cdb.cdb.name == "{{ resource_prefix }}-4" + - rax_cdb.cdb.status == 'ACTIVE' + +- name: Test rax_cdb resize flavor 2 + rax_cdb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-4" + flavor: 2 + wait: true + wait_timeout: 600 + register: rax_cdb + +- name: Validate rax_cdb resize flavor 2 + assert: + that: + - rax_cdb|success + - rax_cdb|changed + - rax_cdb.cdb.name == "{{ resource_prefix }}-4" + - rax_cdb.cdb.status == 'ACTIVE' + +- name: "Delete integration 4" + rax_cdb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-4" + state: absent + wait: true + register: rax_cdb + +- name: "Validate delete integration 4" + assert: + that: + - rax_cdb|changed + - rax_cdb.cdb.name == "{{ resource_prefix }}-4" +# ============================================================ diff --git a/test/integration/roles/test_rax_cdb_database/meta/main.yml b/test/integration/roles/test_rax_cdb_database/meta/main.yml new file mode 100644 index 00000000000..a3f85b642e3 --- /dev/null +++ b/test/integration/roles/test_rax_cdb_database/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - prepare_rax_tests diff --git a/test/integration/roles/test_rax_cdb_database/tasks/main.yml b/test/integration/roles/test_rax_cdb_database/tasks/main.yml new file mode 100644 index 00000000000..a8f5caa335d --- /dev/null +++ b/test/integration/roles/test_rax_cdb_database/tasks/main.yml @@ -0,0 +1,215 @@ +# ============================================================ +- name: Test rax_cdb_database with no args + rax_cdb_database: + ignore_errors: true + register: rax_cdb_database + +- name: Validate results of rax_cdb_database with no args + assert: + that: + - rax_cdb_database|failed + - rax_cdb_database.msg == 'missing required arguments: name,cdb_id' +# ============================================================ + + + +# ============================================================ +- name: Test rax_cdb_database with name + rax_cdb_database: + name: fail + ignore_errors: true + register: rax_cdb_database + +- name: Validate results of rax_cdb_database with name + assert: + that: + - rax_cdb_database|failed + - rax_cdb_database.msg == 'missing required arguments: cdb_id' +# ============================================================ + + + +# ============================================================ +- name: Test rax_cdb_database with name and cdb_id + rax_cdb_database: + name: fail + cdb_id: '1234' + ignore_errors: true + register: rax_cdb_database + +- name: Validate results of rax_cdb_database with name and cdb_id + assert: + that: + - rax_cdb_database|failed + - rax_cdb_database.msg == 'No credentials supplied!' +# ============================================================ + + + +# ============================================================ +- name: Test rax_cdb_database with name, cdb_id and creds + rax_cdb_database: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + name: fail + cdb_id: '1234' + ignore_errors: true + register: rax_cdb_database + +- name: Validate results of rax_cdb_database with name, cdb_id and creds + assert: + that: + - rax_cdb_database|failed + - rax_cdb_database.msg.startswith('None is not a valid region') +# ============================================================ + + + +# ============================================================ +- name: Test rax_cdb_database with name, invalid cdb_id, creds and region + rax_cdb_database: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: fail + cdb_id: '1234' + ignore_errors: true + register: rax_cdb_database + +- name: Validate rax_cdb_database name, invalid cdb_id, creds and region + assert: + that: + - rax_cdb_database|failed +# ============================================================ + + + +# ============================================================ +- name: Build Cloud Database for testing + rax_cdb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-rax_cdb_database" + wait: true + register: rax_cdb + +- name: Validate build + assert: + that: + - rax_cdb|success + - rax_cdb|changed + - rax_cdb.cdb.name == '{{ resource_prefix }}-rax_cdb_database' + - rax_cdb.cdb.status == 'ACTIVE' +# ============================================================ + + + +# ============================================================ +- name: Test rax_cdb_database with name, cdb_id, creds and region + rax_cdb_database: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-1" + cdb_id: "{{ rax_cdb.cdb.id }}" + register: rax_cdb_database + +- name: Validate rax_cdb_database name, cdb_id, creds and region + assert: + that: + - rax_cdb_database|success + - rax_cdb_database|changed + - rax_cdb_database.database.name == "{{ resource_prefix }}-1" + +- name: Delete integration 1 + rax_cdb_database: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-1" + cdb_id: "{{ rax_cdb.cdb.id }}" + state: absent + register: rax_cdb_database + +- name: Validate delete integration 1 + assert: + that: + - rax_cdb_database|success + - rax_cdb_database|changed + - rax_cdb_database.database.name == "{{ resource_prefix }}-1" +# ============================================================ + + + +# ============================================================ +- name: Test rax_cdb_database idempotency 1 + rax_cdb_database: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-2" + cdb_id: "{{ rax_cdb.cdb.id }}" + register: rax_cdb_database + +- name: Validate rax_cdb_database idempotency 1 + assert: + that: + - rax_cdb_database|success + - rax_cdb_database|changed + - rax_cdb_database.database.name == "{{ resource_prefix }}-2" + +- name: Test rax_cdb_database idempotency 2 + rax_cdb_database: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-2" + cdb_id: "{{ rax_cdb.cdb.id }}" + register: rax_cdb_database + +- name: Validate rax_cdb_database idempotency 2 + assert: + that: + - rax_cdb_database|success + - not rax_cdb_database|changed + - rax_cdb_database.database.name == "{{ resource_prefix }}-2" + +- name: Delete integration 2 + rax_cdb_database: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-2" + cdb_id: "{{ rax_cdb.cdb.id }}" + state: absent + register: rax_cdb_database + +- name: Validate delete integration 2 + assert: + that: + - rax_cdb_database|success + - rax_cdb_database|changed + - rax_cdb_database.database.name == "{{ resource_prefix }}-2" +# ============================================================ + + + +# ============================================================ +- name: Delete Cloud Database + rax_cdb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-rax_cdb_database" + state: absent + wait: true + register: rax_cdb + +- name: Validate Delete + assert: + that: + - rax_cdb|success + - rax_cdb|changed + - rax_cdb.cdb.name == "{{ resource_prefix }}-rax_cdb_database" +# ============================================================ diff --git a/test/integration/roles/test_rax_clb/meta/main.yml b/test/integration/roles/test_rax_clb/meta/main.yml new file mode 100644 index 00000000000..a3f85b642e3 --- /dev/null +++ b/test/integration/roles/test_rax_clb/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - prepare_rax_tests diff --git a/test/integration/roles/test_rax_clb/tasks/main.yml b/test/integration/roles/test_rax_clb/tasks/main.yml new file mode 100644 index 00000000000..8f6a990ceb2 --- /dev/null +++ b/test/integration/roles/test_rax_clb/tasks/main.yml @@ -0,0 +1,857 @@ +# ============================================================ +- name: Test rax_clb with no args + rax_clb: + ignore_errors: true + register: rax_clb + +- name: Validate results of rax_clb with no args + assert: + that: + - rax_clb|failed + - rax_clb.msg == 'missing required arguments: name' +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with name + rax_clb: + name: fail + ignore_errors: true + register: rax_clb + +- name: Validate results of rax_clb with only creds + assert: + that: + - rax_clb|failed + - rax_clb.msg == 'No credentials supplied!' +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with name and credentials + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + name: fail + ignore_errors: true + register: rax_clb + +- name: Validate results of rax_clb with only creds + assert: + that: + - rax_clb|failed + - rax_clb.msg.startswith('None is not a valid region') +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with creds and region + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + ignore_errors: true + register: rax_clb + +- name: Validate rax_clb creds and region + assert: + that: + - rax_clb|failed + - rax_clb.msg == 'missing required arguments: name' +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with creds, region and name + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-1" + wait: true + register: rax_clb + +- name: Validate rax_clb with creds, region and name + assert: + that: + - rax_clb|success + - rax_clb.balancer.port == 80 + - rax_clb.balancer.protocol == 'HTTP' + - rax_clb.balancer.timeout == 30 + - rax_clb.balancer.virtual_ips.0.type == 'PUBLIC' + - rax_clb.balancer.metadata is not defined + - rax_clb.balancer.status == 'ACTIVE' + - rax_clb.balancer.algorithm == 'LEAST_CONNECTIONS' + +- name: "Delete integration 1" + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-1" + state: absent + wait: true + register: rax_clb + +- name: "Validate delete integration 1" + assert: + that: + - rax_clb|changed + - rax_clb.balancer.name == "{{ resource_prefix }}-1" + +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with creds, region, name and protocol + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-2" + protocol: TCP + wait: true + register: rax_clb + +- name: Validate rax_clb with creds, region, name and protocol + assert: + that: + - rax_clb|success + - rax_clb.balancer.port == 80 + - rax_clb.balancer.protocol == 'TCP' + - rax_clb.balancer.timeout == 30 + - rax_clb.balancer.virtual_ips.0.type == 'PUBLIC' + - rax_clb.balancer.metadata is not defined + - rax_clb.balancer.status == 'ACTIVE' + +- name: "Delete integration 2" + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-2" + state: absent + wait: true + register: rax_clb + +- name: "Validate delete integration 2" + assert: + that: + - rax_clb|changed + - rax_clb.balancer.name == "{{ resource_prefix }}-2" +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with creds, region, name, protocol and port + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-3" + protocol: TCP + port: 8080 + wait: true + register: rax_clb + +- name: Validate rax_clb with creds, region, name, protocol and port + assert: + that: + - rax_clb|success + - rax_clb.balancer.port == 8080 + - rax_clb.balancer.protocol == 'TCP' + - rax_clb.balancer.timeout == 30 + - rax_clb.balancer.virtual_ips.0.type == 'PUBLIC' + - rax_clb.balancer.metadata is not defined + - rax_clb.balancer.status == 'ACTIVE' + +- name: "Delete integration 3" + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-3" + state: absent + wait: true + register: rax_clb + +- name: "Validate delete integration 3" + assert: + that: + - rax_clb|changed + - rax_clb.balancer.name == "{{ resource_prefix }}-3" +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with creds, region, name, protocol, port and type + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-4" + protocol: TCP + port: 8080 + type: SERVICENET + wait: true + register: rax_clb + +- name: Validate rax_clb with creds, region, name, protocol and type + assert: + that: + - rax_clb|success + - rax_clb.balancer.port == 8080 + - rax_clb.balancer.protocol == 'TCP' + - rax_clb.balancer.timeout == 30 + - rax_clb.balancer.virtual_ips.0.type == 'SERVICENET' + - rax_clb.balancer.metadata is not defined + - rax_clb.balancer.status == 'ACTIVE' + +- name: "Delete integration 4" + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-4" + state: absent + wait: true + register: rax_clb + +- name: "Validate delete integration 4" + assert: + that: + - rax_clb|changed + - rax_clb.balancer.name == "{{ resource_prefix }}-4" +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with invalid timeout + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-fail" + protocol: TCP + port: 8080 + type: SERVICENET + timeout: 1 + wait: true + ignore_errors: true + register: rax_clb + +- name: Validate rax_clb with invalid timeout + assert: + that: + - rax_clb|failed + - rax_clb.msg == '"timeout" must be greater than or equal to 30' +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with creds, region, name, protocol, port, type and timeout + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-5" + protocol: TCP + port: 8080 + type: SERVICENET + timeout: 60 + wait: true + register: rax_clb + +- name: Validate rax_clb with creds, region, name, protocol, type and timeout + assert: + that: + - rax_clb|success + - rax_clb.balancer.port == 8080 + - rax_clb.balancer.protocol == 'TCP' + - rax_clb.balancer.timeout == 60 + - rax_clb.balancer.virtual_ips.0.type == 'SERVICENET' + - rax_clb.balancer.metadata is not defined + - rax_clb.balancer.status == 'ACTIVE' + +- name: "Delete integration 5" + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-5" + state: absent + wait: true + register: rax_clb + +- name: "Validate delete integration 5" + assert: + that: + - rax_clb|changed + - rax_clb.balancer.name == "{{ resource_prefix }}-5" +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with creds, region, name, protocol, port, type, timeout and algorithm + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-6" + protocol: TCP + port: 8080 + type: SERVICENET + timeout: 60 + algorithm: RANDOM + wait: true + register: rax_clb + +- name: Validate rax_clb with creds, region, name, protocol, type, timeout and algorithm + assert: + that: + - rax_clb|success + - rax_clb.balancer.port == 8080 + - rax_clb.balancer.protocol == 'TCP' + - rax_clb.balancer.timeout == 60 + - rax_clb.balancer.virtual_ips.0.type == 'SERVICENET' + - rax_clb.balancer.metadata is not defined + - rax_clb.balancer.status == 'ACTIVE' + - rax_clb.balancer.algorithm == 'RANDOM' + +- name: "Delete integration 6" + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-6" + state: absent + wait: true + register: rax_clb + +- name: "Validate delete integration 6" + assert: + that: + - rax_clb|changed + - rax_clb.balancer.name == "{{ resource_prefix }}-6" +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with invalid type + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-fail" + type: BAD + timeout: 1 + wait: true + ignore_errors: true + register: rax_clb + +- name: Validate rax_clb with invalid timeout + assert: + that: + - rax_clb|failed + - "rax_clb.msg == 'value of type must be one of: PUBLIC,SERVICENET, got: BAD'" +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with invalid protocol + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-fail" + protocol: BAD + timeout: 1 + wait: true + ignore_errors: true + register: rax_clb + +- name: Validate rax_clb with invalid timeout + assert: + that: + - rax_clb|failed + - "rax_clb.msg == 'value of protocol must be one of: DNS_TCP,DNS_UDP,FTP,HTTP,HTTPS,IMAPS,IMAPv4,LDAP,LDAPS,MYSQL,POP3,POP3S,SMTP,TCP,TCP_CLIENT_FIRST,UDP,UDP_STREAM,SFTP, got: BAD'" +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with invalid algorithm + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-fail" + algorithm: BAD + timeout: 1 + wait: true + ignore_errors: true + register: rax_clb + +- name: Validate rax_clb with invalid timeout + assert: + that: + - rax_clb|failed + - "rax_clb.msg == 'value of algorithm must be one of: RANDOM,LEAST_CONNECTIONS,ROUND_ROBIN,WEIGHTED_LEAST_CONNECTIONS,WEIGHTED_ROUND_ROBIN, got: BAD'" +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with creds, region, name, protocol, port, type, timeout, algorithm and metadata + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-7" + protocol: TCP + port: 8080 + type: SERVICENET + timeout: 60 + algorithm: RANDOM + meta: + foo: bar + wait: true + register: rax_clb + +- name: Validate rax_clb with creds, region, name, protocol, type, timeout, algorithm and metadata + assert: + that: + - rax_clb|success + - rax_clb.balancer.port == 8080 + - rax_clb.balancer.protocol == 'TCP' + - rax_clb.balancer.timeout == 60 + - rax_clb.balancer.virtual_ips.0.type == 'SERVICENET' + - rax_clb.balancer.status == 'ACTIVE' + - rax_clb.balancer.algorithm == 'RANDOM' + - rax_clb.balancer.metadata.0.key == 'foo' + - rax_clb.balancer.metadata.0.value == 'bar' + +- name: "Delete integration 7" + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-7" + state: absent + wait: true + register: rax_clb + +- name: "Validate delete integration 7" + assert: + that: + - rax_clb|changed + - rax_clb.balancer.name == "{{ resource_prefix }}-7" +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with shared VIP HTTP + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-8-HTTP" + wait: true + register: rax_clb_http + +- name: Validate rax_clb with shared VIP HTTP + assert: + that: + - rax_clb_http|success + - rax_clb_http.balancer.protocol == 'HTTP' + - rax_clb_http.balancer.virtual_ips.0.type == 'PUBLIC' + - rax_clb_http.balancer.status == 'ACTIVE' + +- name: Test rax_clb with shared VIP HTTPS + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-8-HTTPS" + protocol: HTTPS + port: 443 + wait: true + vip_id: "{{ (rax_clb_http.balancer.virtual_ips|first).id }}" + register: rax_clb_https + +- name: Validate Test rax_clb with shared VIP + assert: + that: + - rax_clb_https|success + - rax_clb_https.balancer.protocol == 'HTTPS' + - rax_clb_https.balancer.status == 'ACTIVE' + - rax_clb_http.balancer.virtual_ips == rax_clb_https.balancer.virtual_ips + +- name: "Delete integration 8 HTTP" + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-8-HTTP" + state: absent + wait: true + register: rax_clb_http + +- name: "Delete integration 8 HTTPS" + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-8-HTTPS" + state: absent + wait: true + register: rax_clb_http + +- name: "Validate delete integration 8" + assert: + that: + - rax_clb_http|changed + - rax_clb_https|changed +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with updated protocol 1 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-9" + wait: true + register: rax_clb_p1 + +- name: Validate rax_clb with updated protocol 1 + assert: + that: + - rax_clb_p1|success + - rax_clb_p1.balancer.protocol == 'HTTP' + - rax_clb_p1.balancer.virtual_ips.0.type == 'PUBLIC' + - rax_clb_p1.balancer.status == 'ACTIVE' + +- name: Test rax_clb with updated protocol 2 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-9" + protocol: TCP + wait: true + register: rax_clb_p2 + +- name: Validate rax_clb with updated protocol 2 + assert: + that: + - rax_clb_p1.balancer.id == rax_clb_p2.balancer.id + - rax_clb_p2|success + - rax_clb_p2|changed + - rax_clb_p2.balancer.protocol == 'TCP' + - rax_clb_p2.balancer.status == 'ACTIVE' + +- name: Delete integration 9 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-9" + state: absent + wait: true + register: rax_clb + +- name: "Validate delete integration 9" + assert: + that: + - rax_clb|changed +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with updated algorithm 1 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-10" + wait: true + register: rax_clb_a1 + +- name: Validate rax_clb with updated algorithm 1 + assert: + that: + - rax_clb_a1|success + - rax_clb_a1.balancer.algorithm == 'LEAST_CONNECTIONS' + - rax_clb_a1.balancer.status == 'ACTIVE' + +- name: Test rax_clb with updated algoritm 2 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-10" + algorithm: RANDOM + wait: true + register: rax_clb_a2 + +- name: Validate rax_clb with updated algorithm 2 + assert: + that: + - rax_clb_a1.balancer.id == rax_clb_a2.balancer.id + - rax_clb_a2|success + - rax_clb_a2|changed + - rax_clb_a2.balancer.algorithm == 'RANDOM' + - rax_clb_a2.balancer.status == 'ACTIVE' + +- name: Delete integration 10 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-10" + state: absent + wait: true + register: rax_clb + +- name: "Validate delete integration 10" + assert: + that: + - rax_clb|changed + - rax_clb_a1.balancer.id == rax_clb.balancer.id +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with updated port 1 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-11" + wait: true + register: rax_clb_1 + +- name: Validate rax_clb with updated port 1 + assert: + that: + - rax_clb_1|success + - rax_clb_1.balancer.port == 80 + - rax_clb_1.balancer.status == 'ACTIVE' + +- name: Test rax_clb with updated port 2 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-11" + port: 8080 + wait: true + register: rax_clb_2 + +- name: Validate rax_clb with updated port 2 + assert: + that: + - rax_clb_1.balancer.id == rax_clb_2.balancer.id + - rax_clb_2|success + - rax_clb_2|changed + - rax_clb_2.balancer.port == 8080 + - rax_clb_2.balancer.status == 'ACTIVE' + +- name: Delete integration 11 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-11" + state: absent + wait: true + register: rax_clb + +- name: "Validate delete integration 11" + assert: + that: + - rax_clb|changed + - rax_clb_1.balancer.id == rax_clb.balancer.id +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with updated timeout 1 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-12" + wait: true + register: rax_clb_1 + +- name: Validate rax_clb with updated timeout 1 + assert: + that: + - rax_clb_1|success + - rax_clb_1.balancer.timeout == 30 + - rax_clb_1.balancer.status == 'ACTIVE' + +- name: Test rax_clb with updated timeout 2 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-12" + timeout: 60 + wait: true + register: rax_clb_2 + +- name: Validate rax_clb with updated timeout 2 + assert: + that: + - rax_clb_1.balancer.id == rax_clb_2.balancer.id + - rax_clb_2|success + - rax_clb_2|changed + - rax_clb_2.balancer.timeout == 60 + - rax_clb_2.balancer.status == 'ACTIVE' + +- name: Delete integration 12 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-12" + state: absent + wait: true + register: rax_clb + +- name: "Validate delete integration 12" + assert: + that: + - rax_clb|changed + - rax_clb_1.balancer.id == rax_clb.balancer.id +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with invalid updated type 1 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-13" + wait: true + register: rax_clb_1 + +- name: Validate rax_clb with invalid updated type 1 + assert: + that: + - rax_clb_1|success + - rax_clb_1.balancer.status == 'ACTIVE' + +- name: Test rax_clb with invalid updated type 2 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-13" + type: SERVICENET + wait: true + register: rax_clb_2 + ignore_errors: true + +- name: Validate rax_clb with updated timeout 2 + assert: + that: + - rax_clb_2|failed + - rax_clb_2.msg == 'Load balancer Virtual IP type cannot be changed' + +- name: Delete integration 13 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-13" + state: absent + wait: true + register: rax_clb + +- name: "Validate delete integration 13" + assert: + that: + - rax_clb|changed + - rax_clb_1.balancer.id == rax_clb.balancer.id +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb with updated meta 1 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-14" + wait: true + register: rax_clb_1 + +- name: Validate rax_clb with updated meta 1 + assert: + that: + - rax_clb_1|success + - rax_clb_1.balancer.status == 'ACTIVE' + - rax_clb_1.balancer.metadata is not defined + +- name: Test rax_clb with updated meta 2 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-14" + meta: + foo: bar + wait: true + register: rax_clb_2 + +- name: Validate rax_clb with updated meta 2 + assert: + that: + - rax_clb_1.balancer.id == rax_clb_2.balancer.id + - rax_clb_2|success + - rax_clb_2|changed + - rax_clb_2.balancer.metadata.0.key == 'foo' + - rax_clb_2.balancer.metadata.0.value == 'bar' + - rax_clb_2.balancer.status == 'ACTIVE' + +- name: Delete integration 14 + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-14" + state: absent + wait: true + register: rax_clb + +- name: "Validate delete integration 14" + assert: + that: + - rax_clb|changed + - rax_clb_1.balancer.id == rax_clb.balancer.id +# ============================================================ diff --git a/test/integration/roles/test_rax_clb_nodes/meta/main.yml b/test/integration/roles/test_rax_clb_nodes/meta/main.yml new file mode 100644 index 00000000000..a3f85b642e3 --- /dev/null +++ b/test/integration/roles/test_rax_clb_nodes/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - prepare_rax_tests diff --git a/test/integration/roles/test_rax_clb_nodes/tasks/main.yml b/test/integration/roles/test_rax_clb_nodes/tasks/main.yml new file mode 100644 index 00000000000..01bbf9dd9a3 --- /dev/null +++ b/test/integration/roles/test_rax_clb_nodes/tasks/main.yml @@ -0,0 +1,232 @@ +# ============================================================ +- name: Test rax_clb_nodes with no args + rax_clb_nodes: + ignore_errors: true + register: rax_clb_nodes + +- name: Validate results of rax_clb_nodes with no args + assert: + that: + - rax_clb_nodes|failed + - rax_clb_nodes.msg == 'missing required arguments: load_balancer_id' +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb_nodes with load_balancer_Id + rax_clb_nodes: + load_balancer_id: 1234 + ignore_errors: true + register: rax_clb_nodes + +- name: Validate results of rax_clb_nodes with load_balancer_id + assert: + that: + - rax_clb_nodes|failed + - rax_clb_nodes.msg == 'No credentials supplied!' +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb_nodes with credentials and load_balancer_id + rax_clb_nodes: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + load_balancer_id: 1234 + ignore_errors: true + register: rax_clb_nodes + +- name: Validate results of rax_clb_nodes with credentials and load_balancer_id + assert: + that: + - rax_clb_nodes|failed + - rax_clb_nodes.msg.startswith('None is not a valid region') +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb_nodes with creds, region and load_balancer_id + rax_clb_nodes: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + load_balancer_id: 1234 + ignore_errors: true + register: rax_clb_nodes + +- name: Validate rax_clb_nodes creds, region and load_balancer_id + assert: + that: + - rax_clb_nodes|failed + - rax_clb_nodes.msg == 'Load balancer not found' +# ============================================================ + + + +# ============================================================ +- name: Build a CLB to test rax_clb_nodes with + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-clb" + wait: true + register: rax_clb + +- name: Validate rax_clb creation + assert: + that: + - rax_clb|success + +- name: Set variable for CLB ID + set_fact: + rax_clb_id: "{{ rax_clb.balancer.id }}" +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb_nodes with creds, region and valid load_balancer_id + rax_clb_nodes: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + load_balancer_id: "{{ rax_clb_id }}" + ignore_errors: true + register: rax_clb_nodes + +- name: Validate rax_clb_nodes creds, region and valid load_balancer_id + assert: + that: + - rax_clb_nodes|failed + - rax_clb_nodes.msg == 'You must include an address and a port when creating a node.' +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb_nodes with creds, region, load_balancer_id and address + rax_clb_nodes: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + load_balancer_id: "{{ rax_clb_id }}" + address: '10.10.10.10' + ignore_errors: true + register: rax_clb_nodes + +- name: Validate rax_clb_nodes creds, region, load_balancer_id and address + assert: + that: + - rax_clb_nodes|failed + - rax_clb_nodes.msg == 'You must include an address and a port when creating a node.' +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb_nodes with creds, region, load_balancer_id, invalid address and port + rax_clb_nodes: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + load_balancer_id: "{{ rax_clb_id }}" + address: '10.10.10.10' + port: 80 + ignore_errors: true + register: rax_clb_nodes + +- name: Validate rax_clb_nodes creds, region, load_balancer_id, invalid address and port + assert: + that: + - rax_clb_nodes|failed + - rax_clb_nodes.msg == "Invalid node address. The address '10.10.10.10' is currently not accepted for this request." +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb_nodes with creds, region, load_balancer_id, address and port + rax_clb_nodes: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + load_balancer_id: "{{ rax_clb_id }}" + address: '172.16.0.1' + port: 80 + wait: true + register: rax_clb_nodes + +- name: Validate rax_clb_nodes creds, region, load_balancer_id, address and port + assert: + that: + - rax_clb_nodes|success + - rax_clb_nodes.node.address == '172.16.0.1' + - rax_clb_nodes.node.condition == 'ENABLED' + - rax_clb_nodes.node.port == 80 + - rax_clb_nodes.node.status == 'ONLINE' + - rax_clb_nodes.node.type == 'PRIMARY' + - rax_clb_nodes.node.weight == 1 + +- name: Delete integration 1 + rax_clb_nodes: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + load_balancer_id: "{{ rax_clb_id }}" + node_id: "{{ rax_clb_nodes.node.id }}" + state: absent + wait: true + register: rax_clb_nodes + +- name: Validate delete integration 1 + assert: + that: + - rax_clb_nodes|success +# ============================================================ + + + +# ============================================================ +- name: Test rax_clb_nodes with creds, region, load_balancer_id, address, port and type + rax_clb_nodes: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + load_balancer_id: "{{ rax_clb_id }}" + address: '172.16.0.1' + port: 80 + type: secondary + wait: true + ignore_errors: true + register: rax_clb_nodes + +- name: Validate rax_clb_nodes creds, region, load_balancer_id, address, port and type + assert: + that: + - rax_clb_nodes|failed + - rax_clb_nodes.msg == 'you must enable health monitoring to use secondary nodes' +# ============================================================ + + + +# ============================================================ +- name: Delete CLB + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ rax_clb.balancer.name }}" + state: absent + wait: true + register: rax_clb + +- name: "Validate delete integration 3" + assert: + that: + - rax_clb|changed + - rax_clb.balancer.id == rax_clb_id|int +# ============================================================ diff --git a/test/integration/roles/test_rax_facts/meta/main.yml b/test/integration/roles/test_rax_facts/meta/main.yml new file mode 100644 index 00000000000..a3f85b642e3 --- /dev/null +++ b/test/integration/roles/test_rax_facts/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - prepare_rax_tests diff --git a/test/integration/roles/test_rax_facts/tasks/main.yml b/test/integration/roles/test_rax_facts/tasks/main.yml new file mode 100644 index 00000000000..374fd8c7c03 --- /dev/null +++ b/test/integration/roles/test_rax_facts/tasks/main.yml @@ -0,0 +1,279 @@ +# ============================================================ +- name: Test rax_facts with no args + rax_facts: + ignore_errors: true + register: rax_facts + +- name: Validate results of rax_facts with no args + assert: + that: + - rax_facts|failed + - rax_facts.msg == 'one of the following is required: address,id,name' +# ============================================================ + + + +# ============================================================ +- name: Test rax_facts with credentials and address + rax_facts: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + address: '1.2.3.4' + ignore_errors: true + register: rax_facts + +- name: Validate results of rax_facts with only creds + assert: + that: + - rax_facts|failed + - rax_facts.msg.startswith('None is not a valid region') +# ============================================================ + + + +# ============================================================ +- name: Test rax_facts with creds, region and address + rax_facts: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + address: '1.2.3.4' + ignore_errors: true + register: rax_facts + +- name: Validate rax_facts creds, region and address + assert: + that: + - rax_facts|success + - rax_facts.ansible_facts == {} + - not rax_facts|changed +# ============================================================ + + + +# ============================================================ +- name: Test rax_facts with creds, region and id + rax_facts: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + id: '1234' + ignore_errors: true + register: rax_facts + +- name: Validate rax_facts creds, region and id + assert: + that: + - rax_facts|success + - rax_facts.ansible_facts == {} + - not rax_facts|changed +# ============================================================ + + + +# ============================================================ +- name: Test rax_facts with creds, region and name + rax_facts: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: 'bad' + ignore_errors: true + register: rax_facts + +- name: Validate rax_facts creds, region and name + assert: + that: + - rax_facts|success + - rax_facts.ansible_facts == {} + - not rax_facts|changed +# ============================================================ + + + +# ============================================================ +- name: Test rax_facts with creds, region, address, id and name + rax_facts: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + address: '1.2.3.4' + id: '1234' + name: 'bad' + ignore_errors: true + register: rax_facts + +- name: Validate rax_facts creds, region, address, id and name + assert: + that: + - rax_facts|failed + - "rax_facts.msg == 'parameters are mutually exclusive: [\\'address\\', \\'id\\', \\'name\\']'" +# ============================================================ + + + +# ============================================================ +- name: Build CloudServer for rax_facts tests + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-rax_facts" + wait: true + register: rax + +- name: Validate build + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 1 + - rax.instances[0].name == "{{ resource_prefix }}-rax_facts" +# ============================================================ + + + +# ============================================================ +- name: Test rax_facts with creds, region, and valid public IPv4 address + rax_facts: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + address: "{{ rax.success.0.rax_accessipv4 }}" + register: rax_facts + + +- name: Validate rax_facts creds, region, and valid public IPv4 address + assert: + that: + - rax_facts|success + - rax_facts.ansible_facts.rax_flavor == rax.success.0.rax_flavor + - rax_facts.ansible_facts.rax_image == rax.success.0.rax_image + - rax_facts.ansible_facts.rax_addresses == rax.success.0.rax_addresses + - rax_facts.ansible_facts.rax_id == rax.success.0.rax_id + - rax_facts.ansible_facts.rax_name == rax.success.0.rax_name + - rax_facts.ansible_facts.rax_hostid == rax.success.0.rax_hostid +# ============================================================ + + + +# ============================================================ +- name: Test rax_facts with creds, region, and valid public IPv6 address + rax_facts: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + address: "{{ rax.success.0.rax_accessipv6 }}" + register: rax_facts + + +- name: Validate rax_facts creds, region, and valid public IPv6 address + assert: + that: + - rax_facts|success + - rax_facts.ansible_facts.rax_flavor == rax.success.0.rax_flavor + - rax_facts.ansible_facts.rax_image == rax.success.0.rax_image + - rax_facts.ansible_facts.rax_addresses == rax.success.0.rax_addresses + - rax_facts.ansible_facts.rax_id == rax.success.0.rax_id + - rax_facts.ansible_facts.rax_name == rax.success.0.rax_name + - rax_facts.ansible_facts.rax_hostid == rax.success.0.rax_hostid +# ============================================================ + + + +# ============================================================ +- name: Test rax_facts with creds, region, and valid private IPv4 address + rax_facts: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + address: "{{ rax.success.0.rax_networks.private|first }}" + register: rax_facts + + +- name: Validate rax_facts creds, region, and valid private IPv4 address + assert: + that: + - rax_facts|success + - rax_facts.ansible_facts.rax_flavor == rax.success.0.rax_flavor + - rax_facts.ansible_facts.rax_image == rax.success.0.rax_image + - rax_facts.ansible_facts.rax_addresses == rax.success.0.rax_addresses + - rax_facts.ansible_facts.rax_id == rax.success.0.rax_id + - rax_facts.ansible_facts.rax_name == rax.success.0.rax_name + - rax_facts.ansible_facts.rax_hostid == rax.success.0.rax_hostid +# ============================================================ + + + +# ============================================================ +- name: Test rax_facts with creds, region, and valid ID + rax_facts: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + id: "{{ rax.success.0.rax_id }}" + register: rax_facts + + +- name: Validate rax_facts creds, region, and valid ID + assert: + that: + - rax_facts|success + - rax_facts.ansible_facts.rax_flavor == rax.success.0.rax_flavor + - rax_facts.ansible_facts.rax_image == rax.success.0.rax_image + - rax_facts.ansible_facts.rax_addresses == rax.success.0.rax_addresses + - rax_facts.ansible_facts.rax_id == rax.success.0.rax_id + - rax_facts.ansible_facts.rax_name == rax.success.0.rax_name + - rax_facts.ansible_facts.rax_hostid == rax.success.0.rax_hostid +# ============================================================ + + + +# ============================================================ +- name: Test rax_facts with creds, region, and valid name + rax_facts: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ rax.success.0.rax_name }}" + register: rax_facts + + +- name: Validate rax_facts creds, region, and valid name + assert: + that: + - rax_facts|success + - rax_facts.ansible_facts.rax_flavor == rax.success.0.rax_flavor + - rax_facts.ansible_facts.rax_image == rax.success.0.rax_image + - rax_facts.ansible_facts.rax_addresses == rax.success.0.rax_addresses + - rax_facts.ansible_facts.rax_id == rax.success.0.rax_id + - rax_facts.ansible_facts.rax_name == rax.success.0.rax_name + - rax_facts.ansible_facts.rax_hostid == rax.success.0.rax_hostid +# ============================================================ + + + +# ============================================================ +- name: "Delete CloudServer" + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-rax_facts" + state: absent + wait: true + register: rax + +- name: "Validate delete" + assert: + that: + - rax|changed + - rax|success + - rax.action == 'delete' + - rax.success[0].name == "{{ resource_prefix }}-rax_facts" +# ============================================================ diff --git a/test/integration/roles/test_rax_identity/meta/main.yml b/test/integration/roles/test_rax_identity/meta/main.yml new file mode 100644 index 00000000000..a3f85b642e3 --- /dev/null +++ b/test/integration/roles/test_rax_identity/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - prepare_rax_tests diff --git a/test/integration/roles/test_rax_identity/tasks/main.yml b/test/integration/roles/test_rax_identity/tasks/main.yml new file mode 100644 index 00000000000..d5b12eb3509 --- /dev/null +++ b/test/integration/roles/test_rax_identity/tasks/main.yml @@ -0,0 +1,51 @@ +# ============================================================ +- name: Test rax_identity with no args + rax_identity: + ignore_errors: true + register: rax_identity + +- name: Validate results of rax_identity with no args + assert: + that: + - rax_identity|failed + - rax_identity.msg == 'No credentials supplied!' +# ============================================================ + + + +# ============================================================ +- name: Test rax_identity with name and credentials + rax_identity: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + ignore_errors: true + register: rax_identity + +- name: Validate results of rax_identity with name and credentials + assert: + that: + - rax_identity|failed + - rax_identity.msg.startswith('None is not a valid region') +# ============================================================ + + + +# ============================================================ +- name: Test rax_identity with name and credentials and region + rax_identity: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + register: rax_identity + +- name: Validate results of rax_identity with name and credentials + assert: + that: + - rax_identity|success + - not rax_identity|changed + - rax_identity.identity.region == "{{ rackspace_region }}" + - rax_identity.identity.username == "{{ rackspace_username }}" + - rax_identity.identity.authenticated + - rax_identity.identity.services + - rax_identity.identity.auth_token +# ============================================================ diff --git a/test/integration/roles/test_rax_keypair/meta/main.yml b/test/integration/roles/test_rax_keypair/meta/main.yml new file mode 100644 index 00000000000..a3f85b642e3 --- /dev/null +++ b/test/integration/roles/test_rax_keypair/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - prepare_rax_tests diff --git a/test/integration/roles/test_rax_keypair/tasks/main.yml b/test/integration/roles/test_rax_keypair/tasks/main.yml new file mode 100644 index 00000000000..f7f10a46783 --- /dev/null +++ b/test/integration/roles/test_rax_keypair/tasks/main.yml @@ -0,0 +1,245 @@ +# ============================================================ +- name: Test rax_keypair with no args + rax_keypair: + ignore_errors: true + register: rax_keypair + +- name: Validate results of rax_keypair with no args + assert: + that: + - rax_keypair|failed + - rax_keypair.msg == 'missing required arguments: name' +# ============================================================ + + + +# ============================================================ +- name: Test rax_keypair with name + rax_keypair: + name: fail + ignore_errors: true + register: rax_keypair + +- name: Validate results of rax_keypair with no args + assert: + that: + - rax_keypair|failed + - rax_keypair.msg == 'No credentials supplied!' +# ============================================================ + + + +# ============================================================ +- name: Test rax_keypair with name and credentials + rax_keypair: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + name: fail + ignore_errors: true + register: rax_keypair + +- name: Validate results of rax_keypair with name and credentials + assert: + that: + - rax_keypair|failed + - rax_keypair.msg.startswith('None is not a valid region') +# ============================================================ + + + +# ============================================================ +- name: Create public key file for tests + copy: + content: "{{ rackspace_keypair_pub }}" + dest: "{{ output_dir|expanduser }}/{{ resource_prefix }}.pub" + +- name: Set variable for public key path + set_fact: + rackspace_keypair_pub_path: "{{ output_dir|expanduser }}/{{ resource_prefix }}.pub" +# ============================================================ + + + +# ============================================================ +- name: Test rax_keypair with creds, region, name and public_key string + rax_keypair: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-1" + public_key: "{{ rackspace_keypair_pub }}" + register: rax_keypair + +- name: Validate rax_keypair creds, region, name and public_key string + assert: + that: + - rax_keypair|success + - rax_keypair|changed + - rax_keypair.keypair.name == "{{ resource_prefix }}-1" + - rax_keypair.keypair.public_key == "{{ rackspace_keypair_pub }}" + +- name: Delete integration 1 + rax_keypair: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-1" + public_key: "{{ rackspace_keypair_pub }}" + state: absent + register: rax_keypair + +- name: Validate delete integration 1 + assert: + that: + - rax_keypair|success + - rax_keypair|changed + - rax_keypair.keypair.name == "{{ resource_prefix }}-1" +# ============================================================ + + + +# ============================================================ +- name: Test rax_keypair with creds, region, name and public_key path + rax_keypair: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-2" + public_key: "{{ rackspace_keypair_pub_path }}" + register: rax_keypair + +- name: Validate rax_keypair creds, region, name and public_key path + assert: + that: + - rax_keypair|success + - rax_keypair|changed + - rax_keypair.keypair.name == "{{ resource_prefix }}-2" + - rax_keypair.keypair.public_key == "{{ rackspace_keypair_pub }}" + +- name: Delete integration 2 + rax_keypair: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-2" + public_key: "{{ rackspace_keypair_pub }}" + state: absent + register: rax_keypair + +- name: Validate delete integration 2 + assert: + that: + - rax_keypair|success + - rax_keypair|changed + - rax_keypair.keypair.name == "{{ resource_prefix }}-2" +# ============================================================ + + + +# ============================================================ +- name: Test rax_keypair with idempotency 1 + rax_keypair: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-3" + public_key: "{{ rackspace_keypair_pub }}" + register: rax_keypair + +- name: Validate rax_keypair with idempotency 1 + assert: + that: + - rax_keypair|success + - rax_keypair|changed + - rax_keypair.keypair.name == "{{ resource_prefix }}-3" + - rax_keypair.keypair.public_key == "{{ rackspace_keypair_pub }}" + +- name: Test rax_keypair with idempotency 2 + rax_keypair: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-3" + public_key: "{{ rackspace_keypair_pub }}" + register: rax_keypair + +- name: Validate rax_keypair with idempotency 1 + assert: + that: + - rax_keypair|success + - not rax_keypair|changed + - rax_keypair.keypair.name == "{{ resource_prefix }}-3" + - rax_keypair.keypair.public_key == "{{ rackspace_keypair_pub }}" + +- name: Delete integration 3 + rax_keypair: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-3" + public_key: "{{ rackspace_keypair_pub }}" + state: absent + register: rax_keypair + +- name: Validate delete integration 3 + assert: + that: + - rax_keypair|success + - rax_keypair|changed + - rax_keypair.keypair.name == "{{ resource_prefix }}-3" +# ============================================================ + + + +# ============================================================ +- name: Test rax_keypair with creds, region and name + rax_keypair: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-4" + register: rax_keypair + +- name: Validate rax_keypair creds, region and name + assert: + that: + - rax_keypair|success + - rax_keypair|changed + - rax_keypair.keypair.name == "{{ resource_prefix }}-4" + - rax_keypair.keypair.private_key is defined + - rax_keypair.keypair.public_key is defined + +- name: Test rax_keypair with creds, region and name idempotency + rax_keypair: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-4" + register: rax_keypair + +- name: Validate rax_keypair creds, region and name + assert: + that: + - rax_keypair|success + - not rax_keypair|changed + - rax_keypair.keypair.name == "{{ resource_prefix }}-4" + - rax_keypair.keypair.private_key is not defined + - rax_keypair.keypair.public_key is defined + +- name: Delete integration 4 + rax_keypair: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-4" + public_key: "{{ rackspace_keypair_pub }}" + state: absent + register: rax_keypair + +- name: Validate delete integration 4 + assert: + that: + - rax_keypair|success + - rax_keypair|changed + - rax_keypair.keypair.name == "{{ resource_prefix }}-4" +# ============================================================ diff --git a/test/integration/roles/test_rax_meta/meta/main.yml b/test/integration/roles/test_rax_meta/meta/main.yml new file mode 100644 index 00000000000..a3f85b642e3 --- /dev/null +++ b/test/integration/roles/test_rax_meta/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - prepare_rax_tests diff --git a/test/integration/roles/test_rax_meta/tasks/main.yml b/test/integration/roles/test_rax_meta/tasks/main.yml new file mode 100644 index 00000000000..b31336fc54a --- /dev/null +++ b/test/integration/roles/test_rax_meta/tasks/main.yml @@ -0,0 +1,334 @@ +# ============================================================ +- name: Test rax_meta with no args + rax_meta: + ignore_errors: true + register: rax_meta + +- name: Validate results of rax_meta with no args + assert: + that: + - rax_meta|failed + - rax_meta.msg == 'one of the following is required: address,id,name' +# ============================================================ + + + +# ============================================================ +- name: Test rax_meta with credentials and address + rax_meta: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + address: '1.2.3.4' + ignore_errors: true + register: rax_meta + +- name: Validate results of rax_meta with only creds + assert: + that: + - rax_meta|failed + - rax_meta.msg.startswith('None is not a valid region') +# ============================================================ + + +# ============================================================ +- name: Test rax_meta with creds, region and address + rax_meta: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + address: '1.2.3.4' + ignore_errors: true + register: rax_meta + +- name: Validate rax_meta creds, region and address + assert: + that: + - rax_meta|failed + - rax_meta.msg == 'Failed to find a server matching provided search parameters' +# ============================================================ + + + +# ============================================================ +- name: Test rax_meta with creds, region and id + rax_meta: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + id: '1234' + ignore_errors: true + register: rax_meta + +- name: Validate rax_meta creds, region and id + assert: + that: + - rax_meta|failed + - rax_meta.msg == 'Failed to find a server matching provided search parameters' +# ============================================================ + + + +# ============================================================ +- name: Test rax_meta with creds, region and name + rax_meta: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: 'bad' + ignore_errors: true + register: rax_meta + +- name: Validate rax_meta creds, region and name + assert: + that: + - rax_meta|failed + - rax_meta.msg == 'Failed to find a server matching provided search parameters' +# ============================================================ + + + +# ============================================================ +- name: Test rax_meta with creds, region, address, id and name + rax_meta: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + address: '1.2.3.4' + id: '1234' + name: 'bad' + ignore_errors: true + register: rax_meta + +- name: Validate rax_meta creds, region, address, id and name + assert: + that: + - rax_meta|failed + - "rax_meta.msg == 'parameters are mutually exclusive: [\\'address\\', \\'id\\', \\'name\\']'" +# ============================================================ + + +# ============================================================ +- name: Build CloudServer for rax_meta tests + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + image: "{{ rackspace_image_id }}" + flavor: "{{ rackspace_flavor }}" + name: "{{ resource_prefix }}-rax_meta" + meta: + foo: bar + wait: true + register: rax + +- name: Validate build + assert: + that: + - rax|success + - rax|changed + - rax.action == 'create' + - rax.instances|length == 1 + - rax.instances[0].name == "{{ resource_prefix }}-rax_meta" +# ============================================================ + + + +# ============================================================ +- name: Test rax_meta with creds, region, and valid public IPv4 address + rax_meta: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + address: "{{ rax.success.0.rax_accessipv4 }}" + register: rax_meta + + +- name: Validate rax_meta creds, region, and valid public IPv4 address + assert: + that: + - rax_meta|success + - rax_meta.meta == {} +# ============================================================ + + +# ============================================================ +- name: Test rax_meta with creds, region, and valid public IPv6 address + rax_meta: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + address: "{{ rax.success.0.rax_accessipv6 }}" + register: rax_meta + + +- name: Validate rax_meta creds, region, and valid public IPv6 address + assert: + that: + - rax_meta|success + - rax_meta.meta == {} +# ============================================================ + + + +# ============================================================ +- name: Test rax_meta with creds, region, and valid private IPv4 address + rax_meta: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + address: "{{ rax.success.0.rax_networks.private|first }}" + register: rax_meta + + +- name: Validate rax_meta creds, region, and valid private IPv4 address + assert: + that: + - rax_meta|success + - rax_meta.meta == {} +# ============================================================ + + + +# ============================================================ +- name: Test rax_meta with creds, region, and valid ID + rax_meta: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + id: "{{ rax.success.0.rax_id }}" + register: rax_meta + + +- name: Validate rax_meta creds, region, and valid ID + assert: + that: + - rax_meta|success + - rax_meta.meta == {} +# ============================================================ + + + +# ============================================================ +- name: Test rax_meta with creds, region, and valid name + rax_meta: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ rax.success.0.rax_name }}" + register: rax_meta + + +- name: Validate rax_meta creds, region, and valid name + assert: + that: + - rax_meta|success + - rax_meta.meta == {} +# ============================================================ + + + +# ============================================================ +- name: Test rax_meta with creds, region, and valid ID set foo=bar + rax_meta: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + id: "{{ rax.success.0.rax_id }}" + meta: + foo: bar + register: rax_meta + + +- name: Validate rax_meta creds, region, and valid ID set foo=bar + assert: + that: + - rax_meta|success + - rax_meta|changed + - "rax_meta.meta == {'foo': 'bar'}" +# ============================================================ + + + +# ============================================================ +- name: Test rax_meta with creds, region, and valid ID set bar=baz + rax_meta: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + id: "{{ rax.success.0.rax_id }}" + meta: + bar: baz + register: rax_meta + + +- name: Validate rax_meta creds, region, and valid ID set bar=baz + assert: + that: + - rax_meta|success + - "rax_meta.meta == {'bar': 'baz'}" +# ============================================================ + + + +# ============================================================ +- name: Test rax_meta with creds, region, and valid ID set bar=baz (idempotent) + rax_meta: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + id: "{{ rax.success.0.rax_id }}" + meta: + bar: baz + register: rax_meta + + +- name: Validate rax_meta creds, region, and valid ID set bar=baz + assert: + that: + - rax_meta|success + - not rax_meta|changed + - "rax_meta.meta == {'bar': 'baz'}" +# ============================================================ + + + +# ============================================================ +- name: Test rax_meta with creds, region, and valid ID delete meta + rax_meta: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + id: "{{ rax.success.0.rax_id }}" + register: rax_meta + + +- name: Validate rax_meta creds, region, and valid ID delete meta + assert: + that: + - rax_meta|success + - rax_meta|changed + - rax_meta.meta == {} +# ============================================================ + + + +# ============================================================ +- name: "Delete CloudServer" + rax: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + instance_ids: + - "{{ rax.success.0.rax_id }}" + state: absent + wait: true + register: rax + +- name: "Validate delete" + assert: + that: + - rax|changed + - rax|success + - rax.action == 'delete' + - rax.success[0].name == "{{ resource_prefix }}-rax_meta" +# ============================================================ diff --git a/test/integration/roles/test_rax_network/meta/main.yml b/test/integration/roles/test_rax_network/meta/main.yml new file mode 100644 index 00000000000..a3f85b642e3 --- /dev/null +++ b/test/integration/roles/test_rax_network/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - prepare_rax_tests diff --git a/test/integration/roles/test_rax_network/tasks/main.yml b/test/integration/roles/test_rax_network/tasks/main.yml new file mode 100644 index 00000000000..27eda8b273e --- /dev/null +++ b/test/integration/roles/test_rax_network/tasks/main.yml @@ -0,0 +1,158 @@ +# ============================================================ +- name: Test rax_network with no args + rax_network: + ignore_errors: true + register: rax_network + +- name: Validate results of rax_network with no args + assert: + that: + - rax_network|failed + - rax_network.msg == 'missing required arguments: label' +# ============================================================ + + + +# ============================================================ +- name: Test rax_network with label + rax_network: + label: fail + ignore_errors: true + register: rax_network + +- name: Validate results of rax_network with no args + assert: + that: + - rax_network|failed + - rax_network.msg == 'No credentials supplied!' +# ============================================================ + + + +# ============================================================ +- name: Test rax_network with creds + rax_network: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + label: fail + ignore_errors: true + register: rax_network + +- name: Validate results of rax_network with creds + assert: + that: + - rax_network|failed + - rax_network.msg.startswith('None is not a valid region') +# ============================================================ + + + +# ============================================================ +- name: Test rax_network with creds and region + rax_network: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + label: fail + ignore_errors: true + register: rax_network + +- name: Validate results of rax_network with creds and region + assert: + that: + - rax_network|failed + - rax_network.msg == 'missing required arguments: cidr' +# ============================================================ + + + +# ============================================================ +- name: Test rax_network with creds, region and cidr + rax_network: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + cidr: "172.17.141.0/24" + label: "{{ resource_prefix }}-1" + register: rax_network + +- name: Validate results of rax_network with creds, region and cidr + assert: + that: + - rax_network|success + - rax_network|changed + - rax_network.networks.0.cidr == "172.17.141.0/24" + - rax_network.networks.0.label == "{{ resource_prefix }}-1" + +- name: Delete Integration 1 + rax_network: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + label: "{{ resource_prefix }}-1" + state: absent + register: rax_network + +- name: Validate delete integration 1 + assert: + that: + - rax_network|changed + - rax_network|success + - rax_network.networks.0.label == "{{ resource_prefix }}-1" + - rax_network.networks.0.cidr == "172.17.141.0/24" +# ============================================================ + + + +# ============================================================ +- name: Test rax_network idempotency 1 + rax_network: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + cidr: "172.17.142.0/24" + label: "{{ resource_prefix }}-2" + register: rax_network + +- name: Validate rax_network idempotency 1 + assert: + that: + - rax_network|success + - rax_network|changed + - rax_network.networks.0.cidr == "172.17.142.0/24" + - rax_network.networks.0.label == "{{ resource_prefix }}-2" + +- name: Test rax_network idempotency 2 + rax_network: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + cidr: "172.17.142.0/24" + label: "{{ resource_prefix }}-2" + register: rax_network + +- name: Validate rax_network idempotency 2 + assert: + that: + - rax_network|success + - not rax_network|changed + - rax_network.networks.0.cidr == "172.17.142.0/24" + - rax_network.networks.0.label == "{{ resource_prefix }}-2" + +- name: Delete Integration 2 + rax_network: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + label: "{{ resource_prefix }}-2" + state: absent + register: rax_network + +- name: Validate delete integration 2 + assert: + that: + - rax_network|changed + - rax_network|success + - rax_network.networks.0.label == "{{ resource_prefix }}-2" + - rax_network.networks.0.cidr == "172.17.142.0/24" +# ============================================================ diff --git a/test/integration/roles/test_service/files/ansible-broken.upstart b/test/integration/roles/test_service/files/ansible-broken.upstart new file mode 100644 index 00000000000..4e9c6694a14 --- /dev/null +++ b/test/integration/roles/test_service/files/ansible-broken.upstart @@ -0,0 +1,10 @@ +description "ansible test daemon" + +start on runlevel [345] +stop on runlevel [!345] + +expect daemon + +exec ansible_test_service + +manual diff --git a/test/integration/roles/test_service/files/ansible.systemd b/test/integration/roles/test_service/files/ansible.systemd index 4517433fd2c..77c4911c230 100644 --- a/test/integration/roles/test_service/files/ansible.systemd +++ b/test/integration/roles/test_service/files/ansible.systemd @@ -2,6 +2,6 @@ Description=Ansible Test Service [Service] -ExecStart=/usr/sbin/ansible_test_service +ExecStart=/usr/sbin/ansible_test_service "Test\nthat newlines in scripts\nwork" ExecReload=/bin/true Type=forking diff --git a/test/integration/roles/test_service/tasks/main.yml b/test/integration/roles/test_service/tasks/main.yml index 749d164724e..6f941eeb5c1 100644 --- a/test/integration/roles/test_service/tasks/main.yml +++ b/test/integration/roles/test_service/tasks/main.yml @@ -6,7 +6,7 @@ assert: that: - "install_result.dest == '/usr/sbin/ansible_test_service'" - - "install_result.md5sum == '9ad49eaf390b30b1206b793ec71200ed'" + - "install_result.checksum == 'baaa79448a976922c080f1971321d203c6df0961'" - "install_result.state == 'file'" - "install_result.mode == '0755'" @@ -90,6 +90,16 @@ that: - "disable_result.enabled == false" +- name: try to enable a broken service + service: name=ansible_broken_test enabled=yes + register: broken_enable_result + ignore_errors: True + +- name: assert that the broken test failed + assert: + that: + - "broken_enable_result.failed == True" + - name: remove the test daemon script file: path=/usr/sbin/ansible_test_service state=absent register: remove_result diff --git a/test/integration/roles/test_service/tasks/systemd_cleanup.yml b/test/integration/roles/test_service/tasks/systemd_cleanup.yml index 5a3abf46f0f..10a60b216c6 100644 --- a/test/integration/roles/test_service/tasks/systemd_cleanup.yml +++ b/test/integration/roles/test_service/tasks/systemd_cleanup.yml @@ -2,11 +2,18 @@ file: path=/usr/lib/systemd/system/ansible_test.service state=absent register: remove_systemd_result +- name: remove the systemd unit file + file: path=/usr/lib/systemd/system/ansible_test_broken.service state=absent + register: remove_systemd_broken_result + +- debug: var=remove_systemd_broken_result - name: assert that the systemd unit file was removed assert: that: - "remove_systemd_result.path == '/usr/lib/systemd/system/ansible_test.service'" - "remove_systemd_result.state == 'absent'" + - "remove_systemd_broken_result.path == '/usr/lib/systemd/system/ansible_test_broken.service'" + - "remove_systemd_broken_result.state == 'absent'" - name: make sure systemd is reloaded shell: systemctl daemon-reload diff --git a/test/integration/roles/test_service/tasks/systemd_setup.yml b/test/integration/roles/test_service/tasks/systemd_setup.yml index ca8d4078e64..4a3a81a4a60 100644 --- a/test/integration/roles/test_service/tasks/systemd_setup.yml +++ b/test/integration/roles/test_service/tasks/systemd_setup.yml @@ -2,11 +2,17 @@ copy: src=ansible.systemd dest=/usr/lib/systemd/system/ansible_test.service register: install_systemd_result +- name: install a broken systemd unit file + file: src=ansible_test.service path=/usr/lib/systemd/system/ansible_test_broken.service state=link + register: install_broken_systemd_result + - name: assert that the systemd unit file was installed assert: that: - "install_systemd_result.dest == '/usr/lib/systemd/system/ansible_test.service'" - "install_systemd_result.state == 'file'" - "install_systemd_result.mode == '0644'" - - "install_systemd_result.md5sum == 'f634df77d9160ab05bad4ed49d82a0d0'" + - "install_systemd_result.checksum == 'ca4b413fdf3cb2002f51893b9e42d2e449ec5afb'" + - "install_broken_systemd_result.dest == '/usr/lib/systemd/system/ansible_test_broken.service'" + - "install_broken_systemd_result.state == 'link'" diff --git a/test/integration/roles/test_service/tasks/sysv_setup.yml b/test/integration/roles/test_service/tasks/sysv_setup.yml index 83a1d6a8c48..796a2fe9a71 100644 --- a/test/integration/roles/test_service/tasks/sysv_setup.yml +++ b/test/integration/roles/test_service/tasks/sysv_setup.yml @@ -8,5 +8,5 @@ - "install_sysv_result.dest == '/etc/init.d/ansible_test'" - "install_sysv_result.state == 'file'" - "install_sysv_result.mode == '0755'" - - "install_sysv_result.md5sum == 'ebf6a9064ca8628187f3a6caf8e2a279'" + - "install_sysv_result.checksum == '174fa255735064b420600e4c8637ea0eff28d0c1'" diff --git a/test/integration/roles/test_service/tasks/upstart_cleanup.yml b/test/integration/roles/test_service/tasks/upstart_cleanup.yml index c99446bf652..a589d5a986e 100644 --- a/test/integration/roles/test_service/tasks/upstart_cleanup.yml +++ b/test/integration/roles/test_service/tasks/upstart_cleanup.yml @@ -2,9 +2,14 @@ file: path=/etc/init/ansible_test.conf state=absent register: remove_upstart_result +- name: remove the upstart init file + file: path=/etc/init/ansible_test_broken.conf state=absent + register: remove_upstart_broken_result + - name: assert that the upstart init file was removed assert: that: - "remove_upstart_result.path == '/etc/init/ansible_test.conf'" - "remove_upstart_result.state == 'absent'" - + - "remove_upstart_broken_result.path == '/etc/init/ansible_test_broken.conf'" + - "remove_upstart_broken_result.state == 'absent'" diff --git a/test/integration/roles/test_service/tasks/upstart_setup.yml b/test/integration/roles/test_service/tasks/upstart_setup.yml index e889ef2789d..e9607bb030e 100644 --- a/test/integration/roles/test_service/tasks/upstart_setup.yml +++ b/test/integration/roles/test_service/tasks/upstart_setup.yml @@ -2,11 +2,18 @@ copy: src=ansible.upstart dest=/etc/init/ansible_test.conf mode=0644 register: install_upstart_result +- name: install an upstart init file that will fail (manual in .conf) + copy: src=ansible-broken.upstart dest=/etc/init/ansible_broken_test.conf mode=0644 + register: install_upstart_broken_result + - name: assert that the upstart init file was installed assert: that: - "install_upstart_result.dest == '/etc/init/ansible_test.conf'" - "install_upstart_result.state == 'file'" - "install_upstart_result.mode == '0644'" - - "install_upstart_result.md5sum == 'ab3900ea4de8423add764c12aeb90c01'" - + - "install_upstart_result.checksum == '5c314837b6c4dd6c68d1809653a2974e9078e02a'" + - "install_upstart_broken_result.dest == '/etc/init/ansible_broken_test.conf'" + - "install_upstart_broken_result.state == 'file'" + - "install_upstart_broken_result.mode == '0644'" + - "install_upstart_broken_result.checksum == 'e66497894f2b2bf71e1380a196cc26089cc24a10'" diff --git a/test/integration/roles/test_stat/tasks/main.yml b/test/integration/roles/test_stat/tasks/main.yml index f27721a6979..0019fda2ae0 100644 --- a/test/integration/roles/test_stat/tasks/main.yml +++ b/test/integration/roles/test_stat/tasks/main.yml @@ -45,7 +45,8 @@ - "'issock' in stat_result.stat" - "'isuid' in stat_result.stat" - "'md5' in stat_result.stat" - - "stat_result.stat.md5 == '5eb63bbbe01eeed093cb22bb8f5acdc3'" + - "'checksum' in stat_result.stat" + - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'" - "'mode' in stat_result.stat" # why is this 420? - "'mtime' in stat_result.stat" - "'nlink' in stat_result.stat" @@ -61,4 +62,8 @@ - "'xgrp' in stat_result.stat" - "'xoth' in stat_result.stat" - "'xusr' in stat_result.stat" - + +- assert: + that: + - "stat_result.stat.md5 == '5eb63bbbe01eeed093cb22bb8f5acdc3'" + when: ansible_fips != True diff --git a/test/integration/roles/test_subversion/tasks/main.yml b/test/integration/roles/test_subversion/tasks/main.yml index c3d741a2903..796f2727a59 100644 --- a/test/integration/roles/test_subversion/tasks/main.yml +++ b/test/integration/roles/test_subversion/tasks/main.yml @@ -44,7 +44,7 @@ # "Revision: 9", # "URL: https://github.com/jimi-c/test_role" # ], -# "befbore": null, +# "before": null, # "changed": true, # "invocation": { # "module_args": "repo=https://github.com/jimi-c/test_role dest=~/ansible_testing/svn", @@ -59,12 +59,17 @@ - "'after' in subverted" - "subverted.after.1 == 'URL: https://github.com/jimi-c/test_role'" - "not subverted.before" - - "subverted.changed" + - "subverted.changed" - name: repeated checkout subversion: repo={{ repo }} dest={{ checkout_dir }} register: subverted2 +- name: verify on a reclone things are marked unchanged + assert: + that: + - "not subverted2.changed" + - name: check for tags stat: path={{ checkout_dir }}/tags register: tags @@ -91,15 +96,28 @@ - debug: var=subverted3 - name: checkout with export - subversion: repo={{ repo }} dest={{ checkout_dir }} export=True + subversion: repo={{ repo }} dest={{ output_dir }}/svn-export export=True register: subverted4 -- name: verify on a reclone things are marked unchanged +- name: check for tags + stat: path={{ output_dir }}/svn-export/tags + register: export_tags + +- name: check for trunk + stat: path={{ output_dir }}/svn-export/trunk + register: export_trunk + +- name: check for branches + stat: path={{ output_dir }}/svn-export/branches + register: export_branches + +- name: assert presence of tags/trunk/branches in export assert: that: - - "not subverted4.changed" + - "export_tags.stat.isdir" + - "export_trunk.stat.isdir" + - "export_branches.stat.isdir" + - "subverted4.changed" # TBA: test for additional options or URL variants welcome - - diff --git a/test/integration/roles/test_sudo/files/baz.txt b/test/integration/roles/test_sudo/files/baz.txt new file mode 100644 index 00000000000..a69dd57604c --- /dev/null +++ b/test/integration/roles/test_sudo/files/baz.txt @@ -0,0 +1 @@ +testing tilde expansion with sudo diff --git a/test/integration/roles/test_sudo/tasks/main.yml b/test/integration/roles/test_sudo/tasks/main.yml new file mode 100644 index 00000000000..022e7d74228 --- /dev/null +++ b/test/integration/roles/test_sudo/tasks/main.yml @@ -0,0 +1,63 @@ +- include_vars: default.yml + +- name: Create test user + user: + name: "{{ sudo_test_user }}" + +- name: tilde expansion honors sudo in file + sudo: True + sudo_user: "{{ sudo_test_user }}" + file: + path: "~/foo.txt" + state: touch + +- name: check that the path in the user's home dir was created + stat: + path: "~{{ sudo_test_user }}/foo.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ sudo_test_user }}'" + +- name: tilde expansion honors sudo in template + sudo: True + sudo_user: "{{ sudo_test_user }}" + template: + src: "bar.j2" + dest: "~/bar.txt" + +- name: check that the path in the user's home dir was created + stat: + path: "~{{ sudo_test_user }}/bar.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ sudo_test_user }}'" + +- name: tilde expansion honors sudo in copy + sudo: True + sudo_user: "{{ sudo_test_user }}" + copy: + src: baz.txt + dest: "~/baz.txt" + +- name: check that the path in the user's home dir was created + stat: + path: "~{{ sudo_test_user }}/baz.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ sudo_test_user }}'" + +- name: Remove test user and their home dir + user: + name: "{{ sudo_test_user }}" + state: "absent" + remove: "yes" + diff --git a/test/integration/roles/test_sudo/templates/bar.j2 b/test/integration/roles/test_sudo/templates/bar.j2 new file mode 100644 index 00000000000..6f184d18149 --- /dev/null +++ b/test/integration/roles/test_sudo/templates/bar.j2 @@ -0,0 +1 @@ +{{ sudo_test_user }} diff --git a/test/integration/roles/test_sudo/vars/default.yml b/test/integration/roles/test_sudo/vars/default.yml new file mode 100644 index 00000000000..f2f7b728b28 --- /dev/null +++ b/test/integration/roles/test_sudo/vars/default.yml @@ -0,0 +1 @@ +sudo_test_user: ansibletest1 diff --git a/test/integration/roles/test_template/tasks/main.yml b/test/integration/roles/test_template/tasks/main.yml index 4784dc6ac82..65064012a20 100644 --- a/test/integration/roles/test_template/tasks/main.yml +++ b/test/integration/roles/test_template/tasks/main.yml @@ -27,6 +27,7 @@ - "'group' in template_result" - "'gid' in template_result" - "'md5sum' in template_result" + - "'checksum' in template_result" - "'owner' in template_result" - "'size' in template_result" - "'src' in template_result" @@ -60,7 +61,77 @@ register: file_result - name: ensure file mode did not change - assert: - that: + assert: + that: - "file_result.changed != True" - + +# VERIFY dest as a directory does not break file attributes +# Note: expanduser is needed to go down the particular codepath that was broken before +- name: setup directory for test + file: state=directory dest={{output_dir | expanduser}}/template-dir mode=0755 owner=nobody group=root + +- name: set file mode when the destination is a directory + template: src=foo.j2 dest={{output_dir | expanduser}}/template-dir/ mode=0600 owner=root group=root + +- name: set file mode when the destination is a directory + template: src=foo.j2 dest={{output_dir | expanduser}}/template-dir/ mode=0600 owner=root group=root + register: file_result + +- name: check that the file has the correct attributes + stat: path={{output_dir | expanduser}}/template-dir/foo.j2 + register: file_attrs + +- assert: + that: + - "file_attrs.stat.uid == 0" + - "file_attrs.stat.pw_name == 'root'" + - "file_attrs.stat.mode == '0600'" + +- name: check that the containing directory did not change attributes + stat: path={{output_dir | expanduser}}/template-dir/ + register: dir_attrs + +- assert: + that: + - "dir_attrs.stat.uid != 0" + - "dir_attrs.stat.pw_name == 'nobody'" + - "dir_attrs.stat.mode == '0755'" + +- name: make a symlink to the templated file + file: + path: '{{ output_dir }}/foo.symlink' + src: '{{ output_dir }}/foo.templated' + state: link + +- name: check that templating the symlink results in the file being templated + template: + src: foo.j2 + dest: '{{output_dir}}/foo.symlink' + mode: 0600 + follow: True + register: template_result + +- assert: + that: + - "template_result.changed == True" + +- name: check that the file has the correct attributes + stat: path={{output_dir | expanduser}}/template-dir/foo.j2 + register: file_attrs + +- assert: + that: + - "file_attrs.stat.mode == '0600'" + +- name: check that templating the symlink again makes no changes + template: + src: foo.j2 + dest: '{{output_dir}}/foo.symlink' + mode: 0600 + follow: True + register: template_result + +- assert: + that: + - "template_result.changed == False" + diff --git a/test/integration/roles/test_unarchive/tasks/main.yml b/test/integration/roles/test_unarchive/tasks/main.yml index 073ccf9145d..3e315a7e94c 100644 --- a/test/integration/roles/test_unarchive/tasks/main.yml +++ b/test/integration/roles/test_unarchive/tasks/main.yml @@ -15,6 +15,15 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make sure we start fresh + +- name: Ensure zip is present to create test archive (yum) + yum: name=zip state=latest + when: ansible_pkg_mgr == 'yum' + +- name: Ensure zip is present to create test archive (apt) + apt: name=zip state=latest + when: ansible_pkg_mgr == 'apt' - name: prep our file copy: src=foo.txt dest={{output_dir}}/foo-unarchive.txt @@ -26,7 +35,7 @@ shell: tar cvf test-unarchive.tar.gz foo-unarchive.txt chdir={{output_dir}} - name: prep a zip file - shell: tar cvf test-unarchive.zip foo-unarchive.txt chdir={{output_dir}} + shell: zip test-unarchive.zip foo-unarchive.txt chdir={{output_dir}} - name: create our tar unarchive destination file: path={{output_dir}}/test-unarchive-tar state=directory @@ -87,6 +96,21 @@ assert: that: - "unarchive02c.changed == false" + - "unarchive02c.skipped == true" + +- name: unarchive a tar.gz file with creates over an existing file using complex_args + unarchive: + src: "{{output_dir}}/test-unarchive.tar.gz" + dest: "{{output_dir | expanduser}}/test-unarchive-tar-gz" + copy: no + creates: "{{output_dir}}/test-unarchive-tar-gz/foo-unarchive.txt" + register: unarchive02d + +- name: verify that the file was not marked as changed + assert: + that: + - "unarchive02d.changed == false" + - "unarchive02d.skipped == true" - name: remove our tar.gz unarchive destination file: path={{output_dir}}/test-unarchive-tar-gz state=absent @@ -106,7 +130,7 @@ - name: verify that the file was unarchived file: path={{output_dir}}/test-unarchive-zip/foo-unarchive.txt state=file -- name: remove our tar unarchive destination +- name: remove our zip unarchive destination file: path={{output_dir}}/test-unarchive-zip state=absent - name: remove our test file for the archive @@ -135,3 +159,71 @@ - name: remove our unarchive destination file: path=/tmp/foo-unarchive.txt state=absent + +- name: create our unarchive destination + file: path={{output_dir}}/test-unarchive-tar-gz state=directory + +- name: unarchive and set mode + unarchive: + src: "{{ output_dir }}/test-unarchive.tar.gz" + dest: "{{ output_dir | expanduser }}/test-unarchive-tar-gz" + copy: no + mode: "u+rwX,g-rwx,o-rwx" + register: unarchive06 + +- name: Test that the file modes were changed + stat: + path: "{{ output_dir | expanduser }}/test-unarchive-tar-gz/foo-unarchive.txt" + register: unarchive06_stat + +- name: Test that the file modes were changed + assert: + that: + - "unarchive06.changed == true" + - "unarchive06_stat.stat.mode == '0600'" + +- name: unarchive and set mode + unarchive: + src: "{{ output_dir }}/test-unarchive.tar.gz" + dest: "{{ output_dir | expanduser }}/test-unarchive-tar-gz" + copy: no + mode: "u+rwX,g-rwx,o-rwx" + register: unarchive07 + +- name: Test that the files were not changed + assert: + that: + - "unarchive07.changed == false" + +- name: remove our tar.gz unarchive destination + file: path={{ output_dir }}/test-unarchive-tar-gz state=absent + +- name: create a directory with quotable chars + file: path="{{ output_dir }}/test-quotes~root" state=directory + +- name: unarchive into directory with quotable chars + unarchive: + src: "{{ output_dir }}/test-unarchive.tar.gz" + dest: "{{ output_dir | expanduser }}/test-quotes~root" + copy: no + register: unarchive08 + +- name: Test that unarchive succeeded + assert: + that: + - "unarchive08.changed == true" + +- name: unarchive into directory with quotable chars a second time + unarchive: + src: "{{ output_dir }}/test-unarchive.tar.gz" + dest: "{{ output_dir | expanduser }}/test-quotes~root" + copy: no + register: unarchive09 + +- name: Test that unarchive did nothing + assert: + that: + - "unarchive09.changed == false" + +- name: remove quotable chars test + file: path="{{ output_dir }}/test-quotes~root" state=absent diff --git a/test/integration/roles/test_var_precedence/tasks/main.yml b/test/integration/roles/test_var_precedence/tasks/main.yml index 1915ebdb916..7850e6b6463 100644 --- a/test/integration/roles/test_var_precedence/tasks/main.yml +++ b/test/integration/roles/test_var_precedence/tasks/main.yml @@ -1,3 +1,7 @@ +- debug: var=extra_var +- debug: var=vars_var +- debug: var=vars_files_var +- debug: var=vars_files_var_role - assert: that: - 'extra_var == "extra_var"' diff --git a/test/integration/roles/test_var_precedence_dep/defaults/main.yml b/test/integration/roles/test_var_precedence_dep/defaults/main.yml new file mode 100644 index 00000000000..dda4224c35e --- /dev/null +++ b/test/integration/roles/test_var_precedence_dep/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# should be overridden by vars_files in the main play +vars_files_var: "BAD!" +# should be seen in role1 (no override) +defaults_file_var_role1: "defaults_file_var_role1" diff --git a/test/integration/roles/test_var_precedence_dep/tasks/main.yml b/test/integration/roles/test_var_precedence_dep/tasks/main.yml new file mode 100644 index 00000000000..2f8e17096bc --- /dev/null +++ b/test/integration/roles/test_var_precedence_dep/tasks/main.yml @@ -0,0 +1,14 @@ +- debug: var=extra_var +- debug: var=param_var +- debug: var=vars_var +- debug: var=vars_files_var +- debug: var=vars_files_var_role +- debug: var=defaults_file_var_role1 +- assert: + that: + - 'extra_var == "extra_var"' + - 'param_var == "param_var_role1"' + - 'vars_var == "vars_var"' + - 'vars_files_var == "vars_files_var"' + - 'vars_files_var_role == "vars_files_var_dep"' + - 'defaults_file_var_role1 == "defaults_file_var_role1"' diff --git a/test/integration/roles/test_var_precedence_dep/vars/main.yml b/test/integration/roles/test_var_precedence_dep/vars/main.yml new file mode 100644 index 00000000000..a69efad537c --- /dev/null +++ b/test/integration/roles/test_var_precedence_dep/vars/main.yml @@ -0,0 +1,4 @@ +--- +# should override the global vars_files_var since it's local to the role +# but will be set to the value in the last role included which defines it +vars_files_var_role: "vars_files_var_dep" diff --git a/test/integration/roles/test_var_precedence_role1/meta/main.yml b/test/integration/roles/test_var_precedence_role1/meta/main.yml new file mode 100644 index 00000000000..c8b410b59c1 --- /dev/null +++ b/test/integration/roles/test_var_precedence_role1/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - test_var_precedence_dep diff --git a/test/integration/roles/test_var_precedence_role1/tasks/main.yml b/test/integration/roles/test_var_precedence_role1/tasks/main.yml index 410e01b5704..95b2a0bb5a7 100644 --- a/test/integration/roles/test_var_precedence_role1/tasks/main.yml +++ b/test/integration/roles/test_var_precedence_role1/tasks/main.yml @@ -10,5 +10,5 @@ - 'param_var == "param_var_role1"' - 'vars_var == "vars_var"' - 'vars_files_var == "vars_files_var"' - - 'vars_files_var_role == "vars_files_var_role3"' + - 'vars_files_var_role == "vars_files_var_role1"' - 'defaults_file_var_role1 == "defaults_file_var_role1"' diff --git a/test/integration/roles/test_var_precedence_role2/tasks/main.yml b/test/integration/roles/test_var_precedence_role2/tasks/main.yml index 96551a8e9c7..a862389cd36 100644 --- a/test/integration/roles/test_var_precedence_role2/tasks/main.yml +++ b/test/integration/roles/test_var_precedence_role2/tasks/main.yml @@ -10,5 +10,5 @@ - 'param_var == "param_var_role2"' - 'vars_var == "vars_var"' - 'vars_files_var == "vars_files_var"' - - 'vars_files_var_role == "vars_files_var_role3"' + - 'vars_files_var_role == "vars_files_var_role2"' - 'defaults_file_var_role2 == "overridden by role vars"' diff --git a/test/integration/roles/test_win_copy/files/foo.txt b/test/integration/roles/test_win_copy/files/foo.txt new file mode 100644 index 00000000000..7c6ded14ecf --- /dev/null +++ b/test/integration/roles/test_win_copy/files/foo.txt @@ -0,0 +1 @@ +foo.txt diff --git a/test/integration/roles/test_win_copy/files/subdir/bar.txt b/test/integration/roles/test_win_copy/files/subdir/bar.txt new file mode 100644 index 00000000000..76018072e09 --- /dev/null +++ b/test/integration/roles/test_win_copy/files/subdir/bar.txt @@ -0,0 +1 @@ +baz diff --git a/test/integration/roles/test_win_copy/files/subdir/subdir2/baz.txt b/test/integration/roles/test_win_copy/files/subdir/subdir2/baz.txt new file mode 100644 index 00000000000..76018072e09 --- /dev/null +++ b/test/integration/roles/test_win_copy/files/subdir/subdir2/baz.txt @@ -0,0 +1 @@ +baz diff --git a/test/integration/roles/test_win_copy/files/subdir/subdir2/subdir3/subdir4/qux.txt b/test/integration/roles/test_win_copy/files/subdir/subdir2/subdir3/subdir4/qux.txt new file mode 100644 index 00000000000..78df5b06bd3 --- /dev/null +++ b/test/integration/roles/test_win_copy/files/subdir/subdir2/subdir3/subdir4/qux.txt @@ -0,0 +1 @@ +qux \ No newline at end of file diff --git a/test/integration/roles/test_win_copy/meta/main.yml b/test/integration/roles/test_win_copy/meta/main.yml new file mode 100644 index 00000000000..55200b3fc64 --- /dev/null +++ b/test/integration/roles/test_win_copy/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_win_tests + diff --git a/test/integration/roles/test_win_copy/tasks/main.yml b/test/integration/roles/test_win_copy/tasks/main.yml new file mode 100644 index 00000000000..d898219a85c --- /dev/null +++ b/test/integration/roles/test_win_copy/tasks/main.yml @@ -0,0 +1,261 @@ +# test code for the copy module and action plugin +# (c) 2014, Michael DeHaan + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: record the output directory + set_fact: output_file={{win_output_dir}}/foo.txt + +- name: initiate a basic copy +#- name: initiate a basic copy, and also test the mode +# win_copy: src=foo.txt dest={{output_file}} mode=0444 + win_copy: src=foo.txt dest={{output_file}} + register: copy_result + +- debug: var=copy_result + +#- name: check the presence of the output file +- name: check the mode of the output file + win_file: name={{output_file}} state=file + register: file_result_check + +- debug: var=file_result_check + + +#- name: assert the mode is correct +# assert: +# that: +# - "file_result_check.mode == '0444'" + +- name: assert basic copy worked + assert: + that: + - "'changed' in copy_result" +# - "'dest' in copy_result" +# - "'group' in copy_result" +# - "'gid' in copy_result" + - "'checksum' in copy_result" +# - "'owner' in copy_result" +# - "'size' in copy_result" +# - "'src' in copy_result" +# - "'state' in copy_result" +# - "'uid' in copy_result" + +- name: verify that the file was marked as changed + assert: + that: + - "copy_result.changed == true" + +- name: verify that the file checksum is correct + assert: + that: + - "copy_result.checksum[0] == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'" + +- name: check the stat results of the file + win_stat: path={{output_file}} + register: stat_results + +- debug: var=stat_results + +- name: assert the stat results are correct + assert: + that: + - "stat_results.stat.exists == true" +# - "stat_results.stat.isblk == false" +# - "stat_results.stat.isfifo == false" +# - "stat_results.stat.isreg == true" +# - "stat_results.stat.issock == false" + - "stat_results.stat.checksum[0] == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'" + +- name: overwrite the file via same means + win_copy: src=foo.txt dest={{output_file}} + register: copy_result2 + +- name: assert that the file was not changed + assert: + that: + - "not copy_result2|changed" + +# content system not available in win_copy right now +#- name: overwrite the file using the content system +# win_copy: content="modified" dest={{output_file}} +# register: copy_result3 +# +#- name: assert that the file has changed +# assert: +# that: +# - "copy_result3|changed" +# - "'content' not in copy_result3" + +# test recursive copy + +- name: set the output subdirectory + set_fact: output_subdir={{win_output_dir}}/sub/ + +- name: make an output subdirectory + win_file: name={{output_subdir}} state=directory + +- name: test recursive copy to directory +# win_copy: src=subdir dest={{output_subdir}} directory_mode=0700 + win_copy: src=subdir dest={{output_subdir}} + register: recursive_copy_result + +- debug: var=recursive_copy_result + +- name: check that a file in a directory was transferred + win_stat: path={{win_output_dir}}/sub/subdir/bar.txt + register: stat_bar + +- name: check that a file in a deeper directory was transferred + win_stat: path={{win_output_dir}}/sub/subdir/subdir2/baz.txt + register: stat_bar2 + +- name: check that a file in a directory whose parent contains a directory alone was transferred + win_stat: path={{win_output_dir}}/sub/subdir/subdir2/subdir3/subdir4/qux.txt + register: stat_bar3 + +- name: assert recursive copy things + assert: + that: + - "stat_bar.stat.exists" + - "stat_bar2.stat.exists" + - "stat_bar3.stat.exists" + +- name: stat the recursively copied directories + win_stat: path={{win_output_dir}}/sub/{{item}} + register: dir_stats + with_items: + - "subdir" + - "subdir/subdir2" + - "subdir/subdir2/subdir3" + - "subdir/subdir2/subdir3/subdir4" + +# can't check file mode on windows so commenting this one out. +#- name: assert recursive copied directories mode +# assert: +# that: +# - "{{item.stat.mode}} == 0700" +# with_items: dir_stats.results + + +# errors on this aren't presently ignored so this test is commented out. But it would be nice to fix. +# + +# content param not available in win_copy +#- name: overwrite the file again using the content system, also passing along file params +# win_copy: content="modified" dest={{output_file}} +# register: copy_result4 + +#- name: assert invalid copy input location fails +# win_copy: src=invalid_file_location_does_not_exist dest={{win_output_dir}}/file.txt +# ignore_errors: True +# register: failed_copy + +# owner not available in win_copy, commenting out +#- name: copy already copied directory again +# win_copy: src=subdir dest={{output_subdir | expanduser}} owner={{ansible_ssh_user}} +# register: copy_result5 + +#- name: assert that the directory was not changed +# assert: +# that: +# - "not copy_result5|changed" + +# content not available in win_copy, commenting out. +# issue 8394 +#- name: create a file with content and a literal multiline block +# win_copy: | +# content='this is the first line +# this is the second line +# +# this line is after an empty line +# this line is the last line +# ' +# dest={{win_output_dir}}/multiline.txt +# register: copy_result6 + +#- debug: var=copy_result6 + +#- name: assert the multiline file was created correctly +# assert: +# that: +# - "copy_result6.changed" +# - "copy_result6.dest == '{{win_output_dir|expanduser}}/multiline.txt'" +# - "copy_result6.checksum == '1627d51e7e607c92cf1a502bf0c6cce3'" + +# test overwriting a file as an unprivileged user (pull request #8624) +# this can't be relative to {{win_output_dir}} as ~root usually has mode 700 + +#- name: create world writable directory + #win_file: dest=/tmp/worldwritable state=directory mode=0777 + +#- name: create world writable file +# win_copy: dest=/tmp/worldwritable/file.txt content="bar" mode=0666 + +#- name: overwrite the file as user nobody +# win_copy: dest=/tmp/worldwritable/file.txt content="baz" +# sudo: yes +# sudo_user: nobody +# register: copy_result7 + +#- name: assert the file was overwritten +# assert: +# that: +# - "copy_result7.changed" +# - "copy_result7.dest == '/tmp/worldwritable/file.txt'" +# - "copy_result7.checksum == '73feffa4b7f6bb68e44cf984c85f6e88'" + +#- name: clean up +# win_file: dest=/tmp/worldwritable state=absent + +# test overwritting a link using "follow=yes" so that the link +# is preserved and the link target is updated + +#- name: create a test file to symlink to +# win_copy: dest={{win_output_dir}}/follow_test content="this is the follow test file\n" +# +#- name: create a symlink to the test file +# win_file: path={{win_output_dir}}/follow_link src='./follow_test' state=link +# +#- name: update the test file using follow=True to preserve the link +# win_copy: dest={{win_output_dir}}/follow_link content="this is the new content\n" follow=yes +# register: replace_follow_result + +#- name: stat the link path +# win_stat: path={{win_output_dir}}/follow_link +# register: stat_link_result +# +#- name: assert that the link is still a link +# assert: +# that: +# - stat_link_result.stat.islnk +# +#- name: get the md5 of the link target +# shell: checksum {{win_output_dir}}/follow_test | cut -f1 -sd ' ' +# register: target_file_result + +#- name: assert that the link target was updated +# assert: +# that: +# - replace_follow_result.checksum == target_file_result.stdout + +- name: clean up sub + win_file: path={{win_output_dir}}/sub state=absent + +- name: clean up foo.txt + win_file: path={{win_output_dir}}/foo.txt state=absent + + diff --git a/test/integration/roles/test_win_feature/defaults/main.yml b/test/integration/roles/test_win_feature/defaults/main.yml new file mode 100644 index 00000000000..e1833cd8a84 --- /dev/null +++ b/test/integration/roles/test_win_feature/defaults/main.yml @@ -0,0 +1,4 @@ +--- + +# Feature not normally installed by default. +test_win_feature_name: Telnet-Client diff --git a/test/integration/roles/test_win_feature/tasks/main.yml b/test/integration/roles/test_win_feature/tasks/main.yml new file mode 100644 index 00000000000..a49622c232d --- /dev/null +++ b/test/integration/roles/test_win_feature/tasks/main.yml @@ -0,0 +1,131 @@ +# test code for the win_feature module +# (c) 2014, Chris Church + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +- name: start with feature absent + win_feature: + name: "{{ test_win_feature_name }}" + state: absent + +- name: install feature + win_feature: + name: "{{ test_win_feature_name }}" + state: present + restart: no + include_sub_features: yes + include_management_tools: yes + register: win_feature_install_result + +- name: check result of installing feature + assert: + that: + - "win_feature_install_result|changed" + - "win_feature_install_result.success" + - "win_feature_install_result.exitcode == 'Success'" + - "not win_feature_install_result.restart_needed" + - "win_feature_install_result.feature_result|length == 1" + - "win_feature_install_result.feature_result[0].id" + - "win_feature_install_result.feature_result[0].display_name" + - "win_feature_install_result.feature_result[0].message is defined" + - "win_feature_install_result.feature_result[0].restart_needed is defined" + - "win_feature_install_result.feature_result[0].skip_reason" + - "win_feature_install_result.feature_result[0].success is defined" + +- name: install feature again + win_feature: + name: "{{ test_win_feature_name }}" + state: present + restart: no + include_sub_features: yes + include_management_tools: yes + register: win_feature_install_again_result + +- name: check result of installing feature again + assert: + that: + - "not win_feature_install_again_result|changed" + - "win_feature_install_again_result.success" + - "win_feature_install_again_result.exitcode == 'NoChangeNeeded'" + - "not win_feature_install_again_result.restart_needed" + - "win_feature_install_again_result.feature_result == []" + +- name: remove feature + win_feature: + name: "{{ test_win_feature_name }}" + state: absent + register: win_feature_remove_result + +- name: check result of removing feature + assert: + that: + - "win_feature_remove_result|changed" + - "win_feature_remove_result.success" + - "win_feature_remove_result.exitcode == 'Success'" + - "not win_feature_remove_result.restart_needed" + - "win_feature_remove_result.feature_result|length == 1" + - "win_feature_remove_result.feature_result[0].id" + - "win_feature_remove_result.feature_result[0].display_name" + - "win_feature_remove_result.feature_result[0].message is defined" + - "win_feature_remove_result.feature_result[0].restart_needed is defined" + - "win_feature_remove_result.feature_result[0].skip_reason" + - "win_feature_remove_result.feature_result[0].success is defined" + +- name: remove feature again + win_feature: + name: "{{ test_win_feature_name }}" + state: absent + register: win_feature_remove_again_result + +- name: check result of removing feature again + assert: + that: + - "not win_feature_remove_again_result|changed" + - "win_feature_remove_again_result.success" + - "win_feature_remove_again_result.exitcode == 'NoChangeNeeded'" + - "not win_feature_remove_again_result.restart_needed" + - "win_feature_remove_again_result.feature_result == []" + +- name: try to install an invalid feature name + win_feature: + name: "Microsoft-Bob" + state: present + register: win_feature_install_invalid_result + ignore_errors: true + +- name: check result of installing invalid feature name + assert: + that: + - "win_feature_install_invalid_result|failed" + - "not win_feature_install_invalid_result|changed" + - "win_feature_install_invalid_result.msg" + - "win_feature_install_invalid_result.exitcode == 'InvalidArgs'" + +- name: try to remove an invalid feature name + win_feature: + name: "Microsoft-Bob" + state: absent + register: win_feature_remove_invalid_result + ignore_errors: true + +- name: check result of removing invalid feature name + assert: + that: + - "win_feature_remove_invalid_result|failed" + - "not win_feature_remove_invalid_result|changed" + - "win_feature_remove_invalid_result.msg" + - "win_feature_remove_invalid_result.exitcode == 'InvalidArgs'" diff --git a/test/integration/roles/test_win_fetch/tasks/main.yml b/test/integration/roles/test_win_fetch/tasks/main.yml index b07b681bdd1..8c0f5aa21fa 100644 --- a/test/integration/roles/test_win_fetch/tasks/main.yml +++ b/test/integration/roles/test_win_fetch/tasks/main.yml @@ -18,11 +18,11 @@ - name: clean out the test directory local_action: file name={{ output_dir|mandatory }} state=absent - tags: me + run_once: true - name: create the test directory local_action: file name={{ output_dir }} state=directory - tags: me + run_once: true - name: fetch a small file fetch: src="C:/Windows/win.ini" dest={{ output_dir }} @@ -145,7 +145,7 @@ - "not fetch_missing_nofail|changed" - name: attempt to fetch a non-existent file - fail on missing - fetch: src="C:/this_file_should_not_exist.txt" dest={{ output_dir }} fail_on_missing=yes + fetch: src="~/this_file_should_not_exist.txt" dest={{ output_dir }} fail_on_missing=yes register: fetch_missing ignore_errors: true @@ -164,5 +164,6 @@ - name: check fetch directory result assert: that: - - "fetch_dir|failed" + # Doesn't fail anymore, only returns a message. + - "not fetch_dir|changed" - "fetch_dir.msg" diff --git a/test/integration/roles/test_win_file/files/foo.txt b/test/integration/roles/test_win_file/files/foo.txt new file mode 100644 index 00000000000..7c6ded14ecf --- /dev/null +++ b/test/integration/roles/test_win_file/files/foo.txt @@ -0,0 +1 @@ +foo.txt diff --git a/test/integration/roles/test_win_file/files/foobar/directory/fileC b/test/integration/roles/test_win_file/files/foobar/directory/fileC new file mode 100644 index 00000000000..e69de29bb2d diff --git a/test/integration/roles/test_win_file/files/foobar/directory/fileD b/test/integration/roles/test_win_file/files/foobar/directory/fileD new file mode 100644 index 00000000000..e69de29bb2d diff --git a/test/integration/roles/test_win_file/files/foobar/fileA b/test/integration/roles/test_win_file/files/foobar/fileA new file mode 100644 index 00000000000..ab47708c98a --- /dev/null +++ b/test/integration/roles/test_win_file/files/foobar/fileA @@ -0,0 +1 @@ +fileA diff --git a/test/integration/roles/test_win_file/files/foobar/fileB b/test/integration/roles/test_win_file/files/foobar/fileB new file mode 100644 index 00000000000..e69de29bb2d diff --git a/test/integration/roles/test_win_file/meta/main.yml b/test/integration/roles/test_win_file/meta/main.yml new file mode 100644 index 00000000000..55200b3fc64 --- /dev/null +++ b/test/integration/roles/test_win_file/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_win_tests + diff --git a/test/integration/roles/test_win_file/tasks/main.yml b/test/integration/roles/test_win_file/tasks/main.yml new file mode 100644 index 00000000000..35ecfb63874 --- /dev/null +++ b/test/integration/roles/test_win_file/tasks/main.yml @@ -0,0 +1,421 @@ +# Test code for the file module. +# (c) 2014, Richard Isaacson + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- set_fact: output_file={{win_output_dir}}\\foo.txt + +- name: prep with a basic win copy + win_copy: src=foo.txt dest={{output_file}} + +- name: verify that we are checking a file and it is present + win_file: path={{output_file}} state=file + register: file_result + +- name: verify that the file was marked as changed + assert: + that: + - "file_result.changed == false" +# - "file_result.state == 'file'" + +- name: verify that we are checking an absent file + win_file: path={{win_output_dir}}\bar.txt state=absent + register: file2_result + +- name: verify that the file was marked as changed + assert: + that: + - "file2_result.changed == false" +# - "file2_result.state == 'absent'" + +- name: verify we can touch a file + win_file: path={{win_output_dir}}\baz.txt state=touch + register: file3_result + +- name: verify that the file was marked as changed + assert: + that: + - "file3_result.changed == true" +# - "file3_result.state == 'file'" +# - "file3_result.mode == '0644'" + +#- name: change file mode +# win_file: path={{win_output_dir}}/baz.txt mode=0600 +# register: file4_result + +#- name: verify that the file was marked as changed +# assert: +# that: +# - "file4_result.changed == true" +# - "file4_result.mode == '0600'" +# +#- name: change ownership and group +# win_file: path={{win_output_dir}}/baz.txt owner=1234 group=1234 +# +#- name: setup a tmp-like directory for ownership test +# win_file: path=/tmp/worldwritable mode=1777 state=directory + +#- name: Ask to create a file without enough perms to change ownership +# win_file: path=/tmp/worldwritable/baz.txt state=touch owner=root +# sudo: yes +# sudo_user: nobody +# register: chown_result +# ignore_errors: True + +#- name: Ask whether the new file exists +# win_stat: path=/tmp/worldwritable/baz.txt +# register: file_exists_result + +#- name: Verify that the file doesn't exist on failure +# assert: +# that: +# - "chown_result.failed == True" +# - "file_exists_result.stat.exists == False" +# +- name: clean up + win_file: path=/tmp/worldwritable state=absent + +#- name: create soft link to file +# win_file: src={{output_file}} dest={{win_output_dir}}/soft.txt state=link +# register: file5_result + +#- name: verify that the file was marked as changed +# assert: +# that: +# - "file5_result.changed == true" +# +#- name: create hard link to file +# win_file: src={{output_file}} dest={{win_output_dir}}/hard.txt state=hard +# register: file6_result +# +#- name: verify that the file was marked as changed +# assert: +# that: +# - "file6_result.changed == true" +# +- name: create a directory + win_file: path={{win_output_dir}}\foobar state=directory + register: file7_result + +- debug: var=file7_result + +- name: verify that the file was marked as changed + assert: + that: + - "file7_result.changed == true" +# - "file7_result.state == 'directory'" + +# windows and selinux unlikely to ever mix, removing these tests: +#- name: determine if selinux is installed +# shell: which getenforce || exit 0 +# register: selinux_installed + +#- name: determine if selinux is enabled +# shell: getenforce +# register: selinux_enabled +# when: selinux_installed.stdout != "" +# ignore_errors: true + +#- name: decide to include or not include selinux tests +# include: selinux_tests.yml +# when: selinux_installed.stdout != "" and selinux_enabled.stdout != "Disabled" + +- name: remote directory foobar + win_file: path={{win_output_dir}}\foobar state=absent + +- name: remove file foo.txt + win_file: path={{win_output_dir}}\foo.txt state=absent + +- name: remove file bar.txt + win_file: path={{win_output_dir}}\foo.txt state=absent + +- name: remove file baz.txt + win_file: path={{win_output_dir}}\foo.txt state=absent + +- name: win copy directory structure over + win_copy: src=foobar dest={{win_output_dir}} + +- name: remove directory foobar + win_file: path={{win_output_dir}}\foobar state=absent + register: file14_result + +- debug: var=file14_result + +- name: verify that the directory was removed + assert: + that: + - 'file14_result.changed == true' +# - 'file14_result.state == "absent"' + +- name: create a test sub-directory + win_file: dest={{win_output_dir}}/sub1 state=directory + register: file15_result + +- name: verify that the new directory was created + assert: + that: + - 'file15_result.changed == true' +# - 'file15_result.state == "directory"' + +- name: create test files in the sub-directory + win_file: dest={{win_output_dir}}/sub1/{{item}} state=touch + with_items: + - file1 + - file2 + - file3 + register: file16_result + +- name: verify the files were created + assert: + that: + - 'item.changed == true' +# - 'item.state == "file"' + with_items: file16_result.results + +#- name: try to force the sub-directory to a link +# win_file: src={{win_output_dir}}/testing dest={{win_output_dir}}/sub1 state=link force=yes +# register: file17_result +# ignore_errors: true + +#- name: verify the directory was not replaced with a link +# assert: +# that: +# - 'file17_result.failed == true' +# - 'file17_result.state == "directory"' + +#- name: create soft link to directory using absolute path +# win_file: src=/ dest={{win_output_dir}}/root state=link +# register: file18_result +# +#- name: verify that the result was marked as changed +# assert: +# that: +# - "file18_result.changed == true" +# +- name: create another test sub-directory + win_file: dest={{win_output_dir}}/sub2 state=directory + register: file19_result + +- name: verify that the new directory was created + assert: + that: + - 'file19_result.changed == true' +# - 'file19_result.state == "directory"' + +#- name: create soft link to relative file +# win_file: src=../sub1/file1 dest={{win_output_dir}}/sub2/link1 state=link +# register: file20_result +# +#- name: verify that the result was marked as changed +# assert: +# that: +# - "file20_result.changed == true" + +#- name: create soft link to relative directory +# win_file: src=sub1 dest={{win_output_dir}}/sub1-link state=link +# register: file21_result +# +#- name: verify that the result was marked as changed +# assert: +# that: +# - "file21_result.changed == true" +# +#- name: test file creation with symbolic mode +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=u=rwx,g=rwx,o=rwx +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0777' + +#- name: modify symbolic mode for all +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=a=r +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0444' + +#- name: modify symbolic mode for owner +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=u+w +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0644' + +#- name: modify symbolic mode for group +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=g+w +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0664' +# +#- name: modify symbolic mode for world +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=o+w +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0666' +# +#- name: modify symbolic mode for owner +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=u+x +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0766' +## +#- name: modify symbolic mode for group +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=g+x +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0776' +# +#- name: modify symbolic mode for world +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=o+x +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0777' + +#- name: remove symbolic mode for world +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=o-wx +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0774' +# +#- name: remove symbolic mode for group +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=g-wx +# register: result +# +#- name: assert file mode +### assert: +# that: +# - result.mode == '0744' + +#- name: remove symbolic mode for owner +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=u-wx +# register: result + +#- name: assert file mode +# assert: +# that: +# - result.mode == '0444' +# +#- name: set sticky bit with symbolic mode +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=o+t +# register: result + +#- name: assert file mode +# assert: +# that: +# - result.mode == '01444' +# +#- name: remove sticky bit with symbolic mode +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=o-t +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0444' + +#- name: add setgid with symbolic mode +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=g+s +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '02444' +# +#- name: remove setgid with symbolic mode +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=g-s +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0444' + +#- name: add setuid with symbolic mode +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=u+s +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '04444' + +#- name: remove setuid with symbolic mode +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=u-s +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0444' + +# test the file module using follow=yes, so that the target of a +# symlink is modified, rather than the link itself + +#- name: create a test file +# win_copy: dest={{win_output_dir}}\test_follow content="this is a test file\n" mode=0666 + +#- name: create a symlink to the test file +# win_file: path={{win_output_dir}}\test_follow_link src="./test_follow" state=link +# +#- name: modify the permissions on the link using follow=yes +# win_file: path={{win_output_dir}}\test_follow_link mode=0644 follow=yes +# register: result + +#- name: assert that the chmod worked +# assert: +# that: +# - result.changed +# +#- name: stat the link target +# win_stat: path={{win_output_dir}}/test_follow +# register: result +# +#- name: assert that the link target was modified correctly +# assert: +# that: +## - result.stat.mode == '0644' + +- name: clean up sub1 + win_file: path={{win_output_dir}}/sub1 state=absent + +- name: clean up sub2 + win_file: path={{win_output_dir}}/sub2 state=absent + diff --git a/test/integration/roles/test_win_script/defaults/main.yml b/test/integration/roles/test_win_script/defaults/main.yml new file mode 100644 index 00000000000..a2c6475e751 --- /dev/null +++ b/test/integration/roles/test_win_script/defaults/main.yml @@ -0,0 +1,5 @@ +--- + +# Parameters to pass to test scripts. +test_win_script_value: VaLuE +test_win_script_splat: "@{This='THIS'; That='THAT'; Other='OTHER'}" diff --git a/test/integration/roles/test_win_script/files/test_script_with_splatting.ps1 b/test/integration/roles/test_win_script/files/test_script_with_splatting.ps1 new file mode 100644 index 00000000000..429a9a3b7a1 --- /dev/null +++ b/test/integration/roles/test_win_script/files/test_script_with_splatting.ps1 @@ -0,0 +1,6 @@ +# Test script to make sure the Ansible script module works when arguments are +# passed via splatting (http://technet.microsoft.com/en-us/magazine/gg675931.aspx) + +Write-Host $args.This +Write-Host $args.That +Write-Host $args.Other diff --git a/test/integration/roles/test_win_script/tasks/main.yml b/test/integration/roles/test_win_script/tasks/main.yml index 1edfd0b006d..e1e5f25611d 100644 --- a/test/integration/roles/test_win_script/tasks/main.yml +++ b/test/integration/roles/test_win_script/tasks/main.yml @@ -46,6 +46,38 @@ - "not test_script_with_args_result|failed" - "test_script_with_args_result|changed" +- name: run test script that takes parameters passed via splatting + script: test_script_with_splatting.ps1 "@{ This = 'this'; That = '{{ test_win_script_value }}'; Other = 'other'}" + register: test_script_with_splatting_result + +- name: check that script ran and received parameters via splatting + assert: + that: + - "test_script_with_splatting_result.rc == 0" + - "test_script_with_splatting_result.stdout" + - "test_script_with_splatting_result.stdout_lines[0] == 'this'" + - "test_script_with_splatting_result.stdout_lines[1] == test_win_script_value" + - "test_script_with_splatting_result.stdout_lines[2] == 'other'" + - "not test_script_with_splatting_result.stderr" + - "not test_script_with_splatting_result|failed" + - "test_script_with_splatting_result|changed" + +- name: run test script that takes splatted parameters from a variable + script: test_script_with_splatting.ps1 {{ test_win_script_splat|quote }} + register: test_script_with_splatting2_result + +- name: check that script ran and received parameters via splatting from a variable + assert: + that: + - "test_script_with_splatting2_result.rc == 0" + - "test_script_with_splatting2_result.stdout" + - "test_script_with_splatting2_result.stdout_lines[0] == 'THIS'" + - "test_script_with_splatting2_result.stdout_lines[1] == 'THAT'" + - "test_script_with_splatting2_result.stdout_lines[2] == 'OTHER'" + - "not test_script_with_splatting2_result.stderr" + - "not test_script_with_splatting2_result|failed" + - "test_script_with_splatting2_result|changed" + - name: run test script that has errors script: test_script_with_errors.ps1 register: test_script_with_errors_result diff --git a/test/integration/roles/test_win_stat/tasks/main.yml b/test/integration/roles/test_win_stat/tasks/main.yml index a526976ec9c..5069f51a801 100644 --- a/test/integration/roles/test_win_stat/tasks/main.yml +++ b/test/integration/roles/test_win_stat/tasks/main.yml @@ -72,7 +72,7 @@ register: win_stat_no_args ignore_errors: true -- name: check win_stat result witn no path argument +- name: check win_stat result with no path argument assert: that: - "win_stat_no_args|failed" diff --git a/test/integration/roles/test_win_template/files/foo.txt b/test/integration/roles/test_win_template/files/foo.txt new file mode 100644 index 00000000000..3e96db9b3ec --- /dev/null +++ b/test/integration/roles/test_win_template/files/foo.txt @@ -0,0 +1 @@ +templated_var_loaded diff --git a/test/integration/roles/test_win_template/meta/main.yml b/test/integration/roles/test_win_template/meta/main.yml new file mode 100644 index 00000000000..55200b3fc64 --- /dev/null +++ b/test/integration/roles/test_win_template/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_win_tests + diff --git a/test/integration/roles/test_win_template/tasks/main.yml b/test/integration/roles/test_win_template/tasks/main.yml new file mode 100644 index 00000000000..9c2ea920ffa --- /dev/null +++ b/test/integration/roles/test_win_template/tasks/main.yml @@ -0,0 +1,103 @@ +# test code for the template module +# (c) 2014, Michael DeHaan + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: fill in a basic template +# win_template: src=foo.j2 dest={{win_output_dir}}/foo.templated mode=0644 + win_template: src=foo.j2 dest={{win_output_dir}}/foo.templated + register: template_result + +- assert: + that: + - "'changed' in template_result" +# - "'dest' in template_result" +# - "'group' in template_result" +# - "'gid' in template_result" +# - "'checksum' in template_result" +# - "'owner' in template_result" +# - "'size' in template_result" +# - "'src' in template_result" +# - "'state' in template_result" +# - "'uid' in template_result" + +- name: verify that the file was marked as changed + assert: + that: + - "template_result.changed == true" + +# VERIFY CONTENTS + +- name: copy known good into place + win_copy: src=foo.txt dest={{win_output_dir}}\foo.txt + +- name: compare templated file to known good + raw: fc.exe {{win_output_dir}}\foo.templated {{win_output_dir}}\foo.txt + register: diff_result + +- debug: var=diff_result + +- name: verify templated file matches known good + assert: + that: +# - 'diff_result.stdout == ""' + - 'diff_result.stdout_lines[1] == "FC: no differences encountered"' + - "diff_result.rc == 0" + +# VERIFY MODE +# can't set file mode on windows so commenting this test out +#- name: set file mode +# win_file: path={{win_output_dir}}/foo.templated mode=0644 +# register: file_result + +#- name: ensure file mode did not change +# assert: +# that: +# - "file_result.changed != True" + +# commenting out all the following tests as expanduser and file modes not windows concepts. + +# VERIFY dest as a directory does not break file attributes +# Note: expanduser is needed to go down the particular codepath that was broken before +#- name: setup directory for test +# win_file: state=directory dest={{win_output_dir | expanduser}}/template-dir mode=0755 owner=nobody group=root + +#- name: set file mode when the destination is a directory +# win_template: src=foo.j2 dest={{win_output_dir | expanduser}}/template-dir/ mode=0600 owner=root group=root + +#- name: set file mode when the destination is a directory +# win_template: src=foo.j2 dest={{win_output_dir | expanduser}}/template-dir/ mode=0600 owner=root group=root +# register: file_result +# +#- name: check that the file has the correct attributes +# win_stat: path={{win_output_dir | expanduser}}/template-dir/foo.j2 +# register: file_attrs +# +#- assert: +# that: +# - "file_attrs.stat.uid == 0" +# - "file_attrs.stat.pw_name == 'root'" +# - "file_attrs.stat.mode == '0600'" +# +#- name: check that the containing directory did not change attributes +# win_stat: path={{win_output_dir | expanduser}}/template-dir/ +# register: dir_attrs +# +#- assert: +# that: +# - "dir_attrs.stat.uid != 0" +# - "dir_attrs.stat.pw_name == 'nobody'" +# - "dir_attrs.stat.mode == '0755'" diff --git a/test/integration/roles/test_win_template/templates/foo.j2 b/test/integration/roles/test_win_template/templates/foo.j2 new file mode 100644 index 00000000000..55aab8f1ea1 --- /dev/null +++ b/test/integration/roles/test_win_template/templates/foo.j2 @@ -0,0 +1 @@ +{{ templated_var }} diff --git a/test/integration/roles/test_win_template/vars/main.yml b/test/integration/roles/test_win_template/vars/main.yml new file mode 100644 index 00000000000..1e8f64ccf44 --- /dev/null +++ b/test/integration/roles/test_win_template/vars/main.yml @@ -0,0 +1 @@ +templated_var: templated_var_loaded diff --git a/test/integration/test_delegate_to.yml b/test/integration/test_delegate_to.yml new file mode 100644 index 00000000000..4ffac5568f0 --- /dev/null +++ b/test/integration/test_delegate_to.yml @@ -0,0 +1,50 @@ +- hosts: testhost3 + roles: + - { role: prepare_tests } + vars: + - template_role: ./roles/test_template + - templated_var: foo + tasks: + - name: Test no delegate_to + setup: + register: setup_results + + - assert: + that: + - '"127.0.0.3" in setup_results.ansible_facts.ansible_env["SSH_CONNECTION"]' + + - name: Test delegate_to with host in inventory + setup: + register: setup_results + delegate_to: testhost4 + + - assert: + that: + - '"127.0.0.4" in setup_results.ansible_facts.ansible_env["SSH_CONNECTION"]' + + - name: Test delegate_to with host not in inventory + setup: + register: setup_results + delegate_to: 127.0.0.254 + + - assert: + that: + - '"127.0.0.254" in setup_results.ansible_facts.ansible_env["SSH_CONNECTION"]' +# +# Smoketest some other modules do not error as a canary +# + - name: Test file works with delegate_to and a host in inventory + file: path={{ output_dir }}/foo.txt mode=0644 state=touch + delegate_to: testhost4 + + - name: Test file works with delegate_to and a host not in inventory + file: path={{ output_dir }}/test_follow_link mode=0644 state=touch + delegate_to: 127.0.0.254 + + - name: Test template works with delegate_to and a host in inventory + template: src={{ template_role }}/templates/foo.j2 dest={{ output_dir }}/foo.txt + delegate_to: testhost4 + + - name: Test template works with delegate_to and a host not in inventory + template: src={{ template_role }}/templates/foo.j2 dest={{ output_dir }}/foo.txt + delegate_to: 127.0.0.254 diff --git a/test/integration/test_group_by.yml b/test/integration/test_group_by.yml index 6385c1f5ad7..0f4ff413879 100644 --- a/test/integration/test_group_by.yml +++ b/test/integration/test_group_by.yml @@ -92,7 +92,7 @@ - name: set a fact to check that we ran this play set_fact: genus_LAMA=true -- hosts: '{{genus' +- hosts: 'genus' gather_facts: false tasks: - name: no hosts should match this group diff --git a/test/integration/test_var_precedence.yml b/test/integration/test_var_precedence.yml index bbe89a872cf..8bddfff4473 100644 --- a/test/integration/test_var_precedence.yml +++ b/test/integration/test_var_precedence.yml @@ -4,6 +4,8 @@ - vars_var: "vars_var" - param_var: "BAD!" - vars_files_var: "BAD!" + - extra_var_override_once_removed: "{{ extra_var_override }}" + - from_inventory_once_removed: "{{ inven_var | default('BAD!') }}" vars_files: - vars/test_var_precedence.yml roles: @@ -15,17 +17,22 @@ - name: use set_fact to override the registered_var set_fact: registered_var="this is from set_fact" - debug: var=extra_var + - debug: var=extra_var_override_once_removed - debug: var=vars_var - debug: var=vars_files_var - debug: var=vars_files_var_role - debug: var=registered_var + - debug: var=from_inventory_once_removed - assert: that: - 'extra_var == "extra_var"' + - 'extra_var_override == "extra_var_override"' + - 'extra_var_override_once_removed == "extra_var_override"' - 'vars_var == "vars_var"' - 'vars_files_var == "vars_files_var"' - 'vars_files_var_role == "vars_files_var_role3"' - 'registered_var == "this is from set_fact"' + - 'from_inventory_once_removed == "inventory_var"' - hosts: inven_overridehosts vars_files: diff --git a/test/integration/test_winrm.yml b/test/integration/test_winrm.yml index c05a1308318..e2a282e061f 100644 --- a/test/integration/test_winrm.yml +++ b/test/integration/test_winrm.yml @@ -29,3 +29,7 @@ - { role: test_win_get_url, tags: test_win_get_url } - { role: test_win_msi, tags: test_win_msi } - { role: test_win_service, tags: test_win_service } + - { role: test_win_feature, tags: test_win_feature } + - { role: test_win_file, tags: test_win_file } + - { role: test_win_copy, tags: test_win_copy } + - { role: test_win_template, tags: test_win_template } diff --git a/test/integration/unicode.yml b/test/integration/unicode.yml index 69c737a8a33..4e7fe635f45 100644 --- a/test/integration/unicode.yml +++ b/test/integration/unicode.yml @@ -35,9 +35,30 @@ host_id: '{{item}}' with_sequence: start=1 end={{num_hosts}} format=%d + - name: 'A task with unicode extra vars' + debug: var=extra_var + + - name: 'A play for hosts in group: ĪīĬĭ' hosts: 'ĪīĬĭ' gather_facts: true tasks: - debug: msg='Unicode is a good thing ™' - debug: msg=АБВГД + +# Run this test by adding to the CLI: -e start_at_task=True --start-at-task '*¶' +- name: 'Show that we can skip to unicode named tasks' + hosts: localhost + gather_facts: false + vars: + flag: 'original' + start_at_task: False + tasks: + - name: 'Override flag var' + set_fact: flag='new' + + - name: 'A unicode task at the end of the playbook: ¶' + assert: + that: + - 'flag == "original"' + when: start_at_task|bool diff --git a/test/integration/vars_file.yml b/test/integration/vars_file.yml index bd162327d27..c43bf818668 100644 --- a/test/integration/vars_file.yml +++ b/test/integration/vars_file.yml @@ -2,4 +2,11 @@ # in general define test data in the individual role: # roles/role_name/vars/main.yml +foo: "Hello" +things1: + - 1 + - 2 +things2: + - "{{ foo }}" + - "{{ foob }}" vars_file_var: 321 diff --git a/test/units/TestFilters.py b/test/units/TestFilters.py index 7d921a7e375..d15147b0982 100644 --- a/test/units/TestFilters.py +++ b/test/units/TestFilters.py @@ -131,6 +131,11 @@ class TestFilters(unittest.TestCase): 'a\\1') assert a == 'ansible' + def test_to_uuid(self): + a = ansible.runner.filter_plugins.core.to_uuid('example.com') + + assert a == 'ae780c3a-a3ab-53c2-bfb4-098da300b3fe' + #def test_filters(self): # this test is pretty low level using a playbook, hence I am disabling it for now -- MPD. diff --git a/test/units/TestModuleUtilsBasic.py b/test/units/TestModuleUtilsBasic.py index 2ffb310b95e..2ac77764d74 100644 --- a/test/units/TestModuleUtilsBasic.py +++ b/test/units/TestModuleUtilsBasic.py @@ -7,7 +7,8 @@ from nose.tools import timed from ansible import errors from ansible.module_common import ModuleReplacer -from ansible.utils import md5 as utils_md5 +from ansible.module_utils.basic import heuristic_log_sanitize +from ansible.utils import checksum as utils_checksum TEST_MODULE_DATA = """ from ansible.module_utils.basic import * @@ -113,8 +114,8 @@ class TestModuleUtilsBasic(unittest.TestCase): (rc, out, err) = self.module.run_command('echo "foo bar" > %s' % tmp_path, use_unsafe_shell=True) self.assertEqual(rc, 0) self.assertTrue(os.path.exists(tmp_path)) - md5sum = utils_md5(tmp_path) - self.assertEqual(md5sum, '5ceaa7ed396ccb8e959c02753cb4bd18') + checksum = utils_checksum(tmp_path) + self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec') except: raise finally: @@ -127,8 +128,8 @@ class TestModuleUtilsBasic(unittest.TestCase): (rc, out, err) = self.module.run_command('echo "foo bar" >> %s' % tmp_path, use_unsafe_shell=True) self.assertEqual(rc, 0) self.assertTrue(os.path.exists(tmp_path)) - md5sum = utils_md5(tmp_path) - self.assertEqual(md5sum, '5ceaa7ed396ccb8e959c02753cb4bd18') + checksum = utils_checksum(tmp_path) + self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec') except: raise finally: @@ -264,23 +265,23 @@ class TestModuleUtilsBasicHelpers(unittest.TestCase): @timed(5) def test_log_sanitize_speed_many_url(self): - self.module._heuristic_log_sanitize(self.many_url) + heuristic_log_sanitize(self.many_url) @timed(5) def test_log_sanitize_speed_many_ssh(self): - self.module._heuristic_log_sanitize(self.many_ssh) + heuristic_log_sanitize(self.many_ssh) @timed(5) def test_log_sanitize_speed_one_url(self): - self.module._heuristic_log_sanitize(self.one_url) + heuristic_log_sanitize(self.one_url) @timed(5) def test_log_sanitize_speed_one_ssh(self): - self.module._heuristic_log_sanitize(self.one_ssh) + heuristic_log_sanitize(self.one_ssh) @timed(5) def test_log_sanitize_speed_zero_secrets(self): - self.module._heuristic_log_sanitize(self.zero_secrets) + heuristic_log_sanitize(self.zero_secrets) # # Test that the password obfuscation sanitizes somewhat cleanly. @@ -290,16 +291,24 @@ class TestModuleUtilsBasicHelpers(unittest.TestCase): url_data = repr(self._gen_data(3, True, True, self.URL_SECRET)) ssh_data = repr(self._gen_data(3, True, True, self.SSH_SECRET)) - url_output = self.module._heuristic_log_sanitize(url_data) - ssh_output = self.module._heuristic_log_sanitize(ssh_data) + url_output = heuristic_log_sanitize(url_data) + ssh_output = heuristic_log_sanitize(ssh_data) # Basic functionality: Successfully hid the password - self.assertNotIn('pas:word', url_output) - self.assertNotIn('pas:word', ssh_output) + try: + self.assertNotIn('pas:word', url_output) + self.assertNotIn('pas:word', ssh_output) - # Slightly more advanced, we hid all of the password despite the ":" - self.assertNotIn('pas', url_output) - self.assertNotIn('pas', ssh_output) + # Slightly more advanced, we hid all of the password despite the ":" + self.assertNotIn('pas', url_output) + self.assertNotIn('pas', ssh_output) + except AttributeError: + # python2.6 or less's unittest + self.assertFalse('pas:word' in url_output, '%s is present in %s' % ('"pas:word"', url_output)) + self.assertFalse('pas:word' in ssh_output, '%s is present in %s' % ('"pas:word"', ssh_output)) + + self.assertFalse('pas' in url_output, '%s is present in %s' % ('"pas"', url_output)) + self.assertFalse('pas' in ssh_output, '%s is present in %s' % ('"pas"', ssh_output)) # In this implementation we replace the password with 8 "*" which is # also the length of our password. The url fields should be able to @@ -313,9 +322,13 @@ class TestModuleUtilsBasicHelpers(unittest.TestCase): # the data, though: self.assertTrue(ssh_output.startswith("{'")) self.assertTrue(ssh_output.endswith("'}}}}")) - self.assertIn(":********@foo.com/data',", ssh_output) + try: + self.assertIn(":********@foo.com/data',", ssh_output) + except AttributeError: + # python2.6 or less's unittest + self.assertTrue(":********@foo.com/data'," in ssh_output, '%s is not present in %s' % (":********@foo.com/data',", ssh_output)) # The overzealous-ness here may lead to us changing the algorithm in # the future. We could make it consume less of the data (with the - # possiblity of leaving partial passwords exposed) and encourage + # possibility of leaving partial passwords exposed) and encourage # people to use no_log instead of relying on this obfuscation. diff --git a/test/units/TestModuleUtilsDatabase.py b/test/units/TestModuleUtilsDatabase.py new file mode 100644 index 00000000000..5278d6db5aa --- /dev/null +++ b/test/units/TestModuleUtilsDatabase.py @@ -0,0 +1,103 @@ +import collections +import mock +import os + +from nose import tools + +from ansible.module_utils.database import ( + pg_quote_identifier, + SQLParseError, +) + + +# Note: Using nose's generator test cases here so we can't inherit from +# unittest.TestCase +class TestQuotePgIdentifier(object): + + # These are all valid strings + # The results are based on interpreting the identifier as a table name + valid = { + # User quoted + '"public.table"': '"public.table"', + '"public"."table"': '"public"."table"', + '"schema test"."table test"': '"schema test"."table test"', + + # We quote part + 'public.table': '"public"."table"', + '"public".table': '"public"."table"', + 'public."table"': '"public"."table"', + 'schema test.table test': '"schema test"."table test"', + '"schema test".table test': '"schema test"."table test"', + 'schema test."table test"': '"schema test"."table test"', + + # Embedded double quotes + 'table "test"': '"table ""test"""', + 'public."table ""test"""': '"public"."table ""test"""', + 'public.table "test"': '"public"."table ""test"""', + 'schema "test".table': '"schema ""test"""."table"', + '"schema ""test""".table': '"schema ""test"""."table"', + '"""wat"""."""test"""': '"""wat"""."""test"""', + # Sigh, handle these as well: + '"no end quote': '"""no end quote"', + 'schema."table': '"schema"."""table"', + '"schema.table': '"""schema"."table"', + 'schema."table.something': '"schema"."""table"."something"', + + # Embedded dots + '"schema.test"."table.test"': '"schema.test"."table.test"', + '"schema.".table': '"schema."."table"', + '"schema."."table"': '"schema."."table"', + 'schema.".table"': '"schema".".table"', + '"schema".".table"': '"schema".".table"', + '"schema.".".table"': '"schema.".".table"', + # These are valid but maybe not what the user intended + '."table"': '".""table"""', + 'table.': '"table."', + } + + invalid = { + ('test.too.many.dots', 'table'): 'PostgreSQL does not support table with more than 3 dots', + ('"test.too".many.dots', 'database'): 'PostgreSQL does not support database with more than 1 dots', + ('test.too."many.dots"', 'database'): 'PostgreSQL does not support database with more than 1 dots', + ('"test"."too"."many"."dots"', 'database'): "PostgreSQL does not support database with more than 1 dots", + ('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots", + ('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots", + ('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots", + ('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra quotes', + ('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra quotes', + ('"schema."table"','table'): 'User escaped identifiers must escape extra quotes', + ('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot', + } + + def check_valid_quotes(self, identifier, quoted_identifier): + tools.eq_(pg_quote_identifier(identifier, 'table'), quoted_identifier) + + def test_valid_quotes(self): + for identifier in self.valid: + yield self.check_valid_quotes, identifier, self.valid[identifier] + + def check_invalid_quotes(self, identifier, id_type, msg): + if hasattr(tools, 'assert_raises_regexp'): + tools.assert_raises_regexp(SQLParseError, msg, pg_quote_identifier, *(identifier, id_type)) + else: + tools.assert_raises(SQLParseError, pg_quote_identifier, *(identifier, id_type)) + + def test_invalid_quotes(self): + for test in self.invalid: + yield self.check_invalid_quotes, test[0], test[1], self.invalid[test] + + def test_how_many_dots(self): + tools.eq_(pg_quote_identifier('role', 'role'), '"role"') + tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support role with more than 1 dots", pg_quote_identifier, *('role.more', 'role')) + + tools.eq_(pg_quote_identifier('db', 'database'), '"db"') + tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support database with more than 1 dots", pg_quote_identifier, *('db.more', 'database')) + + tools.eq_(pg_quote_identifier('db.schema', 'schema'), '"db"."schema"') + tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support schema with more than 2 dots", pg_quote_identifier, *('db.schema.more', 'schema')) + + tools.eq_(pg_quote_identifier('db.schema.table', 'table'), '"db"."schema"."table"') + tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support table with more than 3 dots", pg_quote_identifier, *('db.schema.table.more', 'table')) + + tools.eq_(pg_quote_identifier('db.schema.table.column', 'column'), '"db"."schema"."table"."column"') + tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support column with more than 4 dots", pg_quote_identifier, *('db.schema.table.column.more', 'column')) diff --git a/test/units/TestModules.py b/test/units/TestModules.py index 83c2b7c3986..aef2e83ed62 100644 --- a/test/units/TestModules.py +++ b/test/units/TestModules.py @@ -16,7 +16,7 @@ class TestModules(unittest.TestCase): for (dirpath, dirnames, filenames) in os.walk(path): for filename in filenames: (path, ext) = os.path.splitext(filename) - if ext != ".ps1": + if ext == ".py": module_list.append(os.path.join(dirpath, filename)) return module_list diff --git a/test/units/TestPlayVarsFiles.py b/test/units/TestPlayVarsFiles.py index d1b1f9dfa22..f241936a12e 100644 --- a/test/units/TestPlayVarsFiles.py +++ b/test/units/TestPlayVarsFiles.py @@ -82,8 +82,8 @@ class TestMe(unittest.TestCase): os.remove(temp_path) # make sure the variable was loaded - assert 'foo' in play.vars, "vars_file was not loaded into play.vars" - assert play.vars['foo'] == 'bar', "foo was not set to bar in play.vars" + assert 'foo' in play.vars_file_vars, "vars_file was not loaded into play.vars_file_vars" + assert play.vars_file_vars['foo'] == 'bar', "foo was not set to bar in play.vars_file_vars" def test_vars_file_nonlist_error(self): @@ -133,10 +133,10 @@ class TestMe(unittest.TestCase): os.remove(temp_path2) # make sure the variables were loaded - assert 'foo' in play.vars, "vars_file was not loaded into play.vars" - assert play.vars['foo'] == 'bar', "foo was not set to bar in play.vars" - assert 'baz' in play.vars, "vars_file2 was not loaded into play.vars" - assert play.vars['baz'] == 'bang', "baz was not set to bang in play.vars" + assert 'foo' in play.vars_file_vars, "vars_file was not loaded into play.vars_file_vars" + assert play.vars_file_vars['foo'] == 'bar', "foo was not set to bar in play.vars_file_vars" + assert 'baz' in play.vars_file_vars, "vars_file2 was not loaded into play.vars_file_vars" + assert play.vars_file_vars['baz'] == 'bang', "baz was not set to bang in play.vars_file_vars" def test_vars_files_first_found(self): @@ -160,8 +160,8 @@ class TestMe(unittest.TestCase): os.remove(temp_path) # make sure the variable was loaded - assert 'foo' in play.vars, "vars_file was not loaded into play.vars" - assert play.vars['foo'] == 'bar', "foo was not set to bar in play.vars" + assert 'foo' in play.vars_file_vars, "vars_file was not loaded into play.vars_file_vars" + assert play.vars_file_vars['foo'] == 'bar', "foo was not set to bar in play.vars_file_vars" def test_vars_files_multiple_found(self): @@ -187,9 +187,9 @@ class TestMe(unittest.TestCase): os.remove(temp_path2) # make sure the variables were loaded - assert 'foo' in play.vars, "vars_file was not loaded into play.vars" - assert play.vars['foo'] == 'bar', "foo was not set to bar in play.vars" - assert 'baz' not in play.vars, "vars_file2 was loaded after vars_file1 was loaded" + assert 'foo' in play.vars_file_vars, "vars_file was not loaded into play.vars_file_vars" + assert play.vars_file_vars['foo'] == 'bar', "foo was not set to bar in play.vars_file_vars" + assert 'baz' not in play.vars_file_vars, "vars_file2 was loaded after vars_file1 was loaded" def test_vars_files_assert_all_found(self): @@ -227,7 +227,7 @@ class TestMe(unittest.TestCase): # VARIABLE PRECEDENCE TESTS ######################################## - # On the first run vars_files are loaded into play.vars by host == None + # On the first run vars_files are loaded into play.vars_file_vars by host == None # * only files with vars from host==None will work here # On the secondary run(s), a host is given and the vars_files are loaded into VARS_CACHE # * this only occurs if host is not None, filename2 has vars in the name, and filename3 does not @@ -273,8 +273,8 @@ class TestMe(unittest.TestCase): def test_vars_files_two_vars_in_name(self): - # self.vars = ds['vars'] - # self.vars += _get_vars() ... aka extra_vars + # self.vars_file_vars = ds['vars'] + # self.vars_file_vars += _get_vars() ... aka extra_vars # make a temp dir temp_dir = mkdtemp() @@ -299,7 +299,7 @@ class TestMe(unittest.TestCase): # cleanup shutil.rmtree(temp_dir) - assert 'foo' in play.vars, "double var templated vars_files filename not loaded" + assert 'foo' in play.vars_file_vars, "double var templated vars_files filename not loaded" def test_vars_files_two_vars_different_scope(self): @@ -337,7 +337,7 @@ class TestMe(unittest.TestCase): # cleanup shutil.rmtree(temp_dir) - assert 'foo' not in play.vars, \ + assert 'foo' not in play.vars_file_vars, \ "mixed scope vars_file loaded into play vars" assert 'foo' in play.playbook.VARS_CACHE['localhost'], \ "differently scoped templated vars_files filename not loaded" @@ -376,7 +376,7 @@ class TestMe(unittest.TestCase): # cleanup shutil.rmtree(temp_dir) - assert 'foo' not in play.vars, \ + assert 'foo' not in play.vars_file_vars, \ "mixed scope vars_file loaded into play vars" assert 'foo' in play.playbook.VARS_CACHE['localhost'], \ "differently scoped templated vars_files filename not loaded" diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index 73ef9796743..478cfebfd1e 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -28,9 +28,18 @@ sys.setdefaultencoding("utf8") class TestUtils(unittest.TestCase): + def _is_fips(self): + try: + data = open('/proc/sys/crypto/fips_enabled').read().strip() + except: + return False + if data != '1': + return False + return True + def test_before_comment(self): ''' see if we can detect the part of a string before a comment. Used by INI parser in inventory ''' - + input = "before # comment" expected = "before " actual = ansible.utils.before_comment(input) @@ -357,15 +366,29 @@ class TestUtils(unittest.TestCase): dict(foo=dict(bar='qux'))) def test_md5s(self): + if self._is_fips(): + raise SkipTest('MD5 unavailable on FIPs enabled systems') self.assertEqual(ansible.utils.md5s('ansible'), '640c8a5376aa12fa15cf02130ce239a6') # Need a test that causes UnicodeEncodeError See 4221 def test_md5(self): + if self._is_fips(): + raise SkipTest('MD5 unavailable on FIPs enabled systems') self.assertEqual(ansible.utils.md5(os.path.join(os.path.dirname(__file__), 'ansible.cfg')), 'fb7b5b90ea63f04bde33e804b6fad42c') self.assertEqual(ansible.utils.md5(os.path.join(os.path.dirname(__file__), 'ansible.cf')), None) + def test_checksum_s(self): + self.assertEqual(ansible.utils.checksum_s('ansible'), 'bef45157a43c9e5f469d188810814a4a8ab9f2ed') + # Need a test that causes UnicodeEncodeError See 4221 + + def test_checksum(self): + self.assertEqual(ansible.utils.checksum(os.path.join(os.path.dirname(__file__), 'ansible.cfg')), + '658b67c8ac7595adde7048425ff1f9aba270721a') + self.assertEqual(ansible.utils.checksum(os.path.join(os.path.dirname(__file__), 'ansible.cf')), + None) + def test_default(self): self.assertEqual(ansible.utils.default(None, lambda: {}), {}) self.assertEqual(ansible.utils.default(dict(foo='bar'), lambda: {}), dict(foo='bar')) @@ -433,10 +456,6 @@ class TestUtils(unittest.TestCase): hash = ansible.utils.do_encrypt('ansible', 'sha256_crypt') self.assertTrue(passlib.hash.sha256_crypt.verify('ansible', hash)) - hash = ansible.utils.do_encrypt('ansible', 'md5_crypt', salt_size=4) - self.assertTrue(passlib.hash.md5_crypt.verify('ansible', hash)) - - try: ansible.utils.do_encrypt('ansible', 'ansible') except ansible.errors.AnsibleError: @@ -444,6 +463,12 @@ class TestUtils(unittest.TestCase): else: raise AssertionError('Incorrect exception, expected AnsibleError') + def test_do_encrypt_md5(self): + if self._is_fips(): + raise SkipTest('MD5 unavailable on FIPS systems') + hash = ansible.utils.do_encrypt('ansible', 'md5_crypt', salt_size=4) + self.assertTrue(passlib.hash.md5_crypt.verify('ansible', hash)) + def test_last_non_blank_line(self): self.assertEqual(ansible.utils.last_non_blank_line('a\n\nb\n\nc'), 'c') self.assertEqual(ansible.utils.last_non_blank_line(''), '') @@ -471,7 +496,7 @@ class TestUtils(unittest.TestCase): self.assertEqual(ansible.utils.boolean("foo"), False) def test_make_sudo_cmd(self): - cmd = ansible.utils.make_sudo_cmd('root', '/bin/sh', '/bin/ls') + cmd = ansible.utils.make_sudo_cmd(C.DEFAULT_SUDO_EXE, 'root', '/bin/sh', '/bin/ls') self.assertTrue(isinstance(cmd, tuple)) self.assertEqual(len(cmd), 3) self.assertTrue('-u root' in cmd[0]) @@ -541,10 +566,9 @@ class TestUtils(unittest.TestCase): def test_listify_lookup_plugin_terms(self): basedir = os.path.dirname(__file__) - self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict()), - ['things']) - self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=['one', 'two'])), - ['one', 'two']) + # Straight lookups + #self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=[])), []) + #self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=['one', 'two'])), ['one', 'two']) def test_deprecated(self): sys_stderr = sys.stderr @@ -687,19 +711,13 @@ class TestUtils(unittest.TestCase): ) # invalid quote detection - try: - with self.assertRaises(Exception): - split_args('hey I started a quote"') - with self.assertRaises(Exception): - split_args('hey I started a\' quote') - except TypeError: - # you must be on Python 2.6 still, FIXME - pass + self.assertRaises(Exception, split_args, 'hey I started a quote"') + self.assertRaises(Exception, split_args, 'hey I started a\' quote') # jinja2 loop blocks with lots of complexity _test_combo( # in memory of neighbors cat - # we preserve line breaks unless a line continuation character preceeds them + # we preserve line breaks unless a line continuation character precedes them 'a {% if x %} y {%else %} {{meow}} {% endif %} "cookie\nchip" \\\ndone\nand done', ['a', '{% if x %}', 'y', '{%else %}', '{{meow}}', '{% endif %}', '"cookie\nchip"', 'done\n', 'and', 'done'] ) @@ -838,3 +856,67 @@ class TestUtils(unittest.TestCase): for (spec, result) in tests: self.assertEqual(ansible.utils.role_spec_parse(spec), result) + def test_role_yaml_parse(self): + tests = ( + ( + # Old style + { + 'role': 'debops.elasticsearch', + 'name': 'elks' + }, + { + 'role': 'debops.elasticsearch', + 'name': 'elks', + 'scm': None, + 'src': 'debops.elasticsearch', + 'version': '', + } + ), + ( + { + 'role': 'debops.elasticsearch,1.0,elks', + 'my_param': 'foo' + }, + { + 'role': 'debops.elasticsearch,1.0,elks', + 'name': 'elks', + 'scm': None, + 'src': 'debops.elasticsearch', + 'version': '1.0', + 'my_param': 'foo', + } + ), + ( + { + 'role': 'debops.elasticsearch,1.0', + 'my_param': 'foo' + }, + { + 'role': 'debops.elasticsearch,1.0', + 'name': 'debops.elasticsearch', + 'scm': None, + 'src': 'debops.elasticsearch', + 'version': '1.0', + 'my_param': 'foo', + } + ), + # New style + ( + { + 'src': 'debops.elasticsearch', + 'name': 'elks', + 'my_param': 'foo' + }, + { + 'name': 'elks', + 'scm': None, + 'src': 'debops.elasticsearch', + 'version': '', + 'my_param': 'foo' + } + ), + ) + + for (role, result) in tests: + self.assertEqual(ansible.utils.role_yaml_parse(role), result) + diff --git a/test/units/TestVault.py b/test/units/TestVault.py index 415d5c14aa8..b720d72e849 100644 --- a/test/units/TestVault.py +++ b/test/units/TestVault.py @@ -36,6 +36,15 @@ except ImportError: class TestVaultLib(TestCase): + def _is_fips(self): + try: + data = open('/proc/sys/crypto/fips_enabled').read().strip() + except: + return False + if data != '1': + return False + return True + def test_methods_exist(self): v = VaultLib('ansible') slots = ['is_encrypted', @@ -77,6 +86,8 @@ class TestVaultLib(TestCase): assert v.version == "9.9" def test_encrypt_decrypt_aes(self): + if self._is_fips(): + raise SkipTest('MD5 not available on FIPS enabled systems') if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') @@ -84,7 +95,7 @@ class TestVaultLib(TestCase): enc_data = v.encrypt("foobar") dec_data = v.decrypt(enc_data) assert enc_data != "foobar", "encryption failed" - assert dec_data == "foobar", "decryption failed" + assert dec_data == "foobar", "decryption failed" def test_encrypt_decrypt_aes256(self): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: diff --git a/test/units/TestVaultEditor.py b/test/units/TestVaultEditor.py index cf7515370ab..cfa5bc13e63 100644 --- a/test/units/TestVaultEditor.py +++ b/test/units/TestVaultEditor.py @@ -37,6 +37,15 @@ except ImportError: class TestVaultEditor(TestCase): + def _is_fips(self): + try: + data = open('/proc/sys/crypto/fips_enabled').read().strip() + except: + return False + if data != '1': + return False + return True + def test_methods_exist(self): v = VaultEditor(None, None, None) slots = ['create_file', @@ -51,6 +60,8 @@ class TestVaultEditor(TestCase): assert hasattr(v, slot), "VaultLib is missing the %s method" % slot def test_decrypt_1_0(self): + if self._is_fips(): + raise SkipTest('Vault-1.0 will not function on FIPS enabled systems') if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest dirpath = tempfile.mkdtemp() @@ -75,18 +86,18 @@ class TestVaultEditor(TestCase): assert error_hit == False, "error decrypting 1.0 file" assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() - def test_decrypt_1_0_newline(self): + def test_decrypt_1_1_newline(self): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest dirpath = tempfile.mkdtemp() - filename = os.path.join(dirpath, "foo-ansible-1.0-ansible-newline-ansible.yml") + filename = os.path.join(dirpath, "foo-ansible-1.1-ansible-newline-ansible.yml") shutil.rmtree(dirpath) shutil.copytree("vault_test_data", dirpath) ve = VaultEditor(None, "ansible\nansible\n", filename) # make sure the password functions for the cipher error_hit = False - try: + try: ve.decrypt_file() except errors.AnsibleError, e: error_hit = True @@ -97,8 +108,8 @@ class TestVaultEditor(TestCase): f.close() shutil.rmtree(dirpath) - assert error_hit == False, "error decrypting 1.0 file with newline in password" - #assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() + assert error_hit == False, "error decrypting 1.1 file with newline in password" + #assert fdata.strip() == "foo", "incorrect decryption of 1.1 file: %s" % fdata.strip() def test_decrypt_1_1(self): @@ -112,7 +123,7 @@ class TestVaultEditor(TestCase): # make sure the password functions for the cipher error_hit = False - try: + try: ve.decrypt_file() except errors.AnsibleError, e: error_hit = True @@ -123,11 +134,13 @@ class TestVaultEditor(TestCase): f.close() shutil.rmtree(dirpath) - assert error_hit == False, "error decrypting 1.0 file" - assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() + assert error_hit == False, "error decrypting 1.1 file" + assert fdata.strip() == "foo", "incorrect decryption of 1.1 file: %s" % fdata.strip() def test_rekey_migration(self): + if self._is_fips(): + raise SkipTest('Vault-1.0 will not function on FIPS enabled systems') if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest dirpath = tempfile.mkdtemp() diff --git a/test/units/module_tests/TestApt.py b/test/units/module_tests/TestApt.py new file mode 100644 index 00000000000..e7f2dafc95d --- /dev/null +++ b/test/units/module_tests/TestApt.py @@ -0,0 +1,42 @@ +import collections +import mock +import os +import unittest + +from ansible.modules.core.packaging.os.apt import ( + expand_pkgspec_from_fnmatches, +) + + +class AptExpandPkgspecTestCase(unittest.TestCase): + + def setUp(self): + FakePackage = collections.namedtuple("Package", ("name",)) + self.fake_cache = [ FakePackage("apt"), + FakePackage("apt-utils"), + FakePackage("not-selected"), + ] + + def test_trivial(self): + foo = ["apt"] + self.assertEqual( + expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo) + + def test_version_wildcard(self): + foo = ["apt=1.0*"] + self.assertEqual( + expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo) + + def test_pkgname_wildcard_version_wildcard(self): + foo = ["apt*=1.0*"] + m_mock = mock.Mock() + self.assertEqual( + expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache), + ['apt', 'apt-utils']) + + def test_pkgname_expands(self): + foo = ["apt*"] + m_mock = mock.Mock() + self.assertEqual( + expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache), + ["apt", "apt-utils"]) diff --git a/test/units/module_tests/TestDocker.py b/test/units/module_tests/TestDocker.py new file mode 100644 index 00000000000..b8c8cf1e235 --- /dev/null +++ b/test/units/module_tests/TestDocker.py @@ -0,0 +1,19 @@ +import collections +import os +import unittest + +from ansible.modules.core.cloud.docker.docker import get_split_image_tag + +class DockerSplitImageTagTestCase(unittest.TestCase): + + def test_trivial(self): + self.assertEqual(get_split_image_tag('test'), ('test', 'latest')) + + def test_with_org_name(self): + self.assertEqual(get_split_image_tag('ansible/centos7-ansible'), ('ansible/centos7-ansible', 'latest')) + + def test_with_tag(self): + self.assertEqual(get_split_image_tag('test:devel'), ('test', 'devel')) + + def test_with_tag_and_org_name(self): + self.assertEqual(get_split_image_tag('ansible/centos7-ansible:devel'), ('ansible/centos7-ansible', 'devel')) diff --git a/test/units/vault_test_data/foo-ansible-1.0-ansible-newline-ansible.yml b/test/units/vault_test_data/foo-ansible-1.0-ansible-newline-ansible.yml deleted file mode 100644 index dd4e6e746b0..00000000000 --- a/test/units/vault_test_data/foo-ansible-1.0-ansible-newline-ansible.yml +++ /dev/null @@ -1,4 +0,0 @@ -$ANSIBLE_VAULT;1.0;AES -53616c7465645f5ff0442ae8b08e2ff316d0d6512013185df7aded44f3c0eeef1b7544d078be1fe7 -ed88d0fedcb11928df45558f4b7f80fce627fbb08c5288885ab053f4129175779a8f24f5c1113731 -7d22cee14284670953c140612edf62f92485123fc4f15099ffe776e906e08145 diff --git a/test/units/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml b/test/units/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml new file mode 100644 index 00000000000..6e025a1c40a --- /dev/null +++ b/test/units/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +61333063333663376535373431643063613232393438623732643966613962363563383132363631 +3235363730623635323039623439343561313566313361630a313632643338613636303637623765 +64356531643630303636323064336439393335313836366235336464633635376339663830333232 +6338353337663139320a646632386131646431656165656338633535386535623236393265373634 +37656134633661333935346434363237613435323865356234323264663838643931 diff --git a/ticket_stubs/_module_issue_move.md b/ticket_stubs/_module_issue_move.md new file mode 100644 index 00000000000..173b3932ed8 --- /dev/null +++ b/ticket_stubs/_module_issue_move.md @@ -0,0 +1,35 @@ +Module Repo Information +======================= + +Hi! + +Thanks very much for your interest in Ansible. It sincerely means a lot to us. + +On September 26, 2014, due to enormous levels of contribution to the project Ansible decided to reorganize module repos, making it easier +for developers to work on the project and for us to more easily manage new contributions and tickets. + +We split modules from the main project off into two repos, http://github.com/ansible/ansible-modules-core and http://github.com/ansible/ansible-modules-extras + +If you would still like this ticket attended to, we will need your help in having it reopened in one of the two new repos, and instructions are provided below. + +We apologize that we are not able to make this transition happen seamlessly, though this is a one-time change and your help is greatly appreciated -- +this will greatly improve velocity going forward. + +Both sets of modules will ship with Ansible, though they'll receive slightly different ticket handling. + +To locate where a module lives between 'core' and 'extras' + + * Find the module at http://docs.ansible.com/list_of_all_modules.html + * Open the documentation page for that module + * If the bottom of the docs say "This is an extras module", submit your ticket to https://github.com/ansible/ansible-modules-extras + * Otherwise, submit your pull request to update the existing module to https://github.com/ansible/ansible-modules-core + * action_plugins (modules with server side components) still live in the main repo. If your ticket affects both, open the ticket + on the module repo just the same. + +Additionally, should you need more help with this, you can ask questions on: + + * the ansible-project mailing list: https://groups.google.com/forum/#!forum/ansible-project + +Thank you very much! + + diff --git a/ticket_stubs/_module_pr_move.md b/ticket_stubs/_module_pr_move.md new file mode 100644 index 00000000000..59eb6cce7b6 --- /dev/null +++ b/ticket_stubs/_module_pr_move.md @@ -0,0 +1,44 @@ +Module Repo Information +======================= + +Hi! + +Thanks very much for your interest in Ansible. It sincerely means a lot to us. + +On September 26, 2014, due to enormous levels of contribution to the project Ansible decided to reorganize module repos, making it easier +for developers to work on the project and for us to more easily manage new contributions and tickets. + +We split modules from the main project off into two repos, http://github.com/ansible/ansible-modules-core and http://github.com/ansible/ansible-modules-extras + +If you still would like this pull request merged, we will need your help making this target the new repo. If you do not take any action, this +pull request unfortunately cannot be applied. + +We apologize that we are not able to make this transition happen seamlessly, though this is a one-time change and your help is greatly appreciated -- +this will greatly improve velocity going forward. + +Both sets of modules will ship with Ansible, though they'll receive slightly different ticket handling. + +To locate where a module lives between 'core' and 'extras' + + * Find the module at http://docs.ansible.com/list_of_all_modules.html + * Open the documentation page for that module + * If the bottom of the docs say "This is an extras module", submit your ticket to https://github.com/ansible/ansible-modules-extras + * Otherwise, submit your pull request to update the existing module to https://github.com/ansible/ansible-modules-core + * Note that python modules in ansible now also end in ".py" and this extension is required for new contributions. + * action_plugins (modules with server side components) still live in the main repo. If your pull request touches both, which should be + exceedingly rare, submit two new pull requests and make sure to mention the links to each other in the comments. + +Otherwise, if this is a new module: + + * Submit your pull request to add a module to https://github.com/ansible/ansible-modules-extras + +It may be possible to re-patriate your pull requests automatically, one user-submitted approach for advanced git users +has been suggested at https://gist.github.com/willthames/afbaaab0c9681ed45619 + +Additionally, should you need more help with this, you can ask questions on: + + * the development mailing list: https://groups.google.com/forum/#!forum/ansible-devel + +Thank you very much! + + diff --git a/ticket_stubs/bug_confirmed_p1_or_p2.md b/ticket_stubs/bug_confirmed_p1_or_p2.md index deb287b84d2..463a8675865 100644 --- a/ticket_stubs/bug_confirmed_p1_or_p2.md +++ b/ticket_stubs/bug_confirmed_p1_or_p2.md @@ -12,7 +12,7 @@ Additionally: * INSERT REASONS! We will definitely see your comments on this issue when reading this ticket, but may not be able to reply promptly. You may also wish to join one of our two mailing lists -which is very active: +which are very active: * https://groups.google.com/forum/#!forum/ansible-project - for user questions, tips, and tricks * https://groups.google.com/forum/#!forum/ansible-devel - for strategy, future planning, and questions about writing code diff --git a/ticket_stubs/great_idea.md b/ticket_stubs/great_idea.md index d9e36173aeb..b6f35fc10cb 100644 --- a/ticket_stubs/great_idea.md +++ b/ticket_stubs/great_idea.md @@ -1,4 +1,4 @@ -Submission Recieved +Submission Received =================== Hi! @@ -13,10 +13,10 @@ It might take us a little while to get to this one. Just as a quick reminder of we assign things a priority between P1 (highest) and P5. We may also ask some questions and it may be a while before we can get to this, but we'd like to thank you very much for your time! -We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgot about you! +We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgotten about you! We will definitely see your comments on this issue when reading this ticket, but may not be able to reply promptly. You may also wish to join one of our two mailing lists -which is very active: +which are very active: * https://groups.google.com/forum/#!forum/ansible-project - for user questions, tips, and tricks * https://groups.google.com/forum/#!forum/ansible-devel - for strategy, future planning, and questions about writing code diff --git a/ticket_stubs/module_repo.md b/ticket_stubs/module_repo.md new file mode 100644 index 00000000000..13791eaaa2e --- /dev/null +++ b/ticket_stubs/module_repo.md @@ -0,0 +1,29 @@ +Module Repo Information +======================= + +Hi! + +Thanks very much for your interest in Ansible. It sincerely means a lot to us. + +This appears to be a submission about a module, and aside from action_plugins, if you know what those are, the modules +in ansible are now moved two separate repos. We would appreciate if you can submit this there instead. + +If this is about a new module, submit pull requests or ideas to: + + * https://github.com/ansible/ansible-modules-extras + +If this is about an existing module: + + * Find the module at http://docs.ansible.com/list_of_all_modules.html + * Open the documentation page for that module + * If the bottom of the docs say "This is an extras module", submit your ticket to https://github.com/ansible/ansible-modules-extras + * Otherwise, submit your module to https://github.com/ansible/ansible-modules-core + +Additionally, should you need more help with this, you can ask questions on: + + * IRC: #ansible on irc.freenode.net + * mailing list: https://groups.google.com/forum/#!forum/ansible-project + +Thanks! + + diff --git a/ticket_stubs/needs_info.md b/ticket_stubs/needs_info.md index 4e0353a26d6..24ec4017acc 100644 --- a/ticket_stubs/needs_info.md +++ b/ticket_stubs/needs_info.md @@ -11,10 +11,10 @@ We have some questions we'd like to know about before we can get this request qu Just as a quick reminder of things, this is a really busy project. We have over 800 contributors and to manage the queue effectively we assign things a priority between P1 (highest) and P5. We'd like to thank you very much for your time! -We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgot about you! +We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgotten about you! We will definitely see your comments on this issue when reading this ticket, but may not be able to reply promptly. You may also wish to join one of our two mailing lists -which is very active: +which are very active: * https://groups.google.com/forum/#!forum/ansible-project - for user questions, tips, and tricks * https://groups.google.com/forum/#!forum/ansible-devel - for strategy, future planning, and questions about writing code diff --git a/ticket_stubs/no_thanks.md b/ticket_stubs/no_thanks.md index 7e43f266951..e9249ba0333 100644 --- a/ticket_stubs/no_thanks.md +++ b/ticket_stubs/no_thanks.md @@ -14,7 +14,7 @@ open dialog. You can stop by the development list, and we'd be glad to talk abo * https://groups.google.com/forum/#!forum/ansible-devel -In the future, sometimes starting a discussion on the development list prior to implenting a feature can make getting things included a little easier, but it's not always neccessary. +In the future, sometimes starting a discussion on the development list prior to implenting a feature can make getting things included a little easier, but it's not always necessary. Thank you once again for this and your interest in Ansible! diff --git a/ticket_stubs/pr_cleanup_commits.md b/ticket_stubs/pr_cleanup_commits.md index b55b70177d6..a06cb6973e9 100644 --- a/ticket_stubs/pr_cleanup_commits.md +++ b/ticket_stubs/pr_cleanup_commits.md @@ -14,10 +14,10 @@ To resolve this problem, it may be helpful to create a new branch and cherry-pic Just as a quick reminder of things, this is a really busy project. We have over 800 contributors and to manage the queue effectively we assign things a priority between P1 (highest) and P5. We'd like to thank you very much for your time! -We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgot about you! +We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgotten about you! We will definitely see your comments on this issue when reading this ticket, but may not be able to reply promptly. You may also wish to join one of our two mailing lists -which is very active: +which are very active: * https://groups.google.com/forum/#!forum/ansible-project - for user questions, tips, and tricks * https://groups.google.com/forum/#!forum/ansible-devel - for strategy, future planning, and questions about writing code diff --git a/ticket_stubs/pr_duplicate.md b/ticket_stubs/pr_duplicate.md index 7294e94ef65..a2c3b48ea29 100644 --- a/ticket_stubs/pr_duplicate.md +++ b/ticket_stubs/pr_duplicate.md @@ -15,7 +15,7 @@ However, we're absolutely always up for discussion. Since this is a really busy * https://groups.google.com/forum/#!forum/ansible-devel -In the future, sometimes starting a discussion on the development list prior to implenting a feature can make getting things included a little easier, but it's not always neccessary. +In the future, sometimes starting a discussion on the development list prior to implenting a feature can make getting things included a little easier, but it's not always necessary. Thank you once again for this and your interest in Ansible! diff --git a/ticket_stubs/pr_needs_rebase.md b/ticket_stubs/pr_needs_rebase.md index 679cff7906e..f90af9940cb 100644 --- a/ticket_stubs/pr_needs_rebase.md +++ b/ticket_stubs/pr_needs_rebase.md @@ -13,10 +13,10 @@ It looks like the code underneath has changed since this was submitted. Can you Just as a quick reminder of things, this is a really busy project. We have over 800 contributors and to manage the queue effectively we assign things a priority between P1 (highest) and P5. We'd like to thank you very much for your time! -We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgot about you! +We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgotten about you! We will definitely see your comments on this issue when reading this ticket, but may not be able to reply promptly. You may also wish to join one of our two mailing lists -which is very active: +which are very active: * https://groups.google.com/forum/#!forum/ansible-project - for user questions, tips, and tricks * https://groups.google.com/forum/#!forum/ansible-devel - for strategy, future planning, and questions about writing code diff --git a/ticket_stubs/pr_needs_revision.md b/ticket_stubs/pr_needs_revision.md index 36e41184a2a..64590cd7d87 100644 --- a/ticket_stubs/pr_needs_revision.md +++ b/ticket_stubs/pr_needs_revision.md @@ -11,10 +11,10 @@ We'd like to see a few things tweaked if you don't mind. If you can help resolv Just as a quick reminder of things, this is a really busy project. We have over 800 contributors and to manage the queue effectively we assign things a priority between P1 (highest) and P5. We'd like to thank you very much for your time! -We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgot about you! +We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgotten about you! We will definitely see your comments on this issue when reading this ticket, but may not be able to reply promptly. You may also wish to join one of our two mailing lists -which is very active: +which are very active: * https://groups.google.com/forum/#!forum/ansible-project - for user questions, tips, and tricks * https://groups.google.com/forum/#!forum/ansible-devel - for strategy, future planning, and questions about writing code diff --git a/ticket_stubs/thanks.md b/ticket_stubs/thanks.md index f86fdd9afa5..646571d568b 100644 --- a/ticket_stubs/thanks.md +++ b/ticket_stubs/thanks.md @@ -1,4 +1,4 @@ -Submission Recieved +Submission Received =================== Hi! @@ -9,10 +9,10 @@ Just as a quick reminder of things, this is a really busy project. We have over we assign things a priority between P1 (highest) and P5. We may also ask some questions and it may be a while before we can get to this, but we'd like to thank you very much for your time! -We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgot about you! +We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgotten about you! We will definitely see your comments on this issue when reading this ticket, but may not be able to reply promptly. You may also wish to join one of our two mailing lists -which is very active: +which are very active: * https://groups.google.com/forum/#!forum/ansible-project - for user questions, tips, and tricks * https://groups.google.com/forum/#!forum/ansible-devel - for strategy, future planning, and questions about writing code diff --git a/v2/README-tests.md b/v2/README-tests.md new file mode 100644 index 00000000000..956160b653a --- /dev/null +++ b/v2/README-tests.md @@ -0,0 +1,33 @@ +Ansible Test System +=================== + +Folders +======= + +test +---- + +Unit tests that test small pieces of code not suited for the integration test +layer, usually very API based, and should leverage mock interfaces rather than +producing side effects. + +Playbook engine code is better suited for integration tests. + +Requirements: sudo pip install paramiko PyYAML jinja2 httplib2 passlib unittest2 mock + +integration +----------- + +Integration test layer, constructed using playbooks. + +Some tests may require cloud credentials, others will not, and destructive +tests are separated from non-destructive so a subset can be run on development +machines. + +learn more +---------- + +hop into a subdirectory and see the associated README.md for more info. + + + diff --git a/v2/ansible/__init__.py b/v2/ansible/__init__.py new file mode 100644 index 00000000000..26869775ead --- /dev/null +++ b/v2/ansible/__init__.py @@ -0,0 +1,22 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +__version__ = '1.v2' diff --git a/v2/ansible/compat/__init__.py b/v2/ansible/compat/__init__.py new file mode 100644 index 00000000000..e77b77d2a6f --- /dev/null +++ b/v2/ansible/compat/__init__.py @@ -0,0 +1,27 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat library for ansible. This contains compatibility definitions for older python +When we need to import a module differently depending on python version, do it +here. Then in the code we can simply import from compat in order to get what we want. +''' + diff --git a/v2/ansible/compat/configparser.py b/v2/ansible/compat/configparser.py new file mode 100644 index 00000000000..7cce6423763 --- /dev/null +++ b/v2/ansible/compat/configparser.py @@ -0,0 +1,30 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python3.x's configparser +''' + +# Python 2.7 +try: + from configparser import * +except ImportError: + from ConfigParser import * diff --git a/v2/ansible/compat/tests/__init__.py b/v2/ansible/compat/tests/__init__.py new file mode 100644 index 00000000000..fc05b2549b2 --- /dev/null +++ b/v2/ansible/compat/tests/__init__.py @@ -0,0 +1,40 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +This module contains things that are only needed for compat in the testsuites, +not in ansible itself. If you are not installing the test suite, you can +safely remove this subdirectory. +''' + +# +# Compat for python2.7 +# + +# One unittest needs to import builtins via __import__() so we need to have +# the string that represents it +try: + import __builtin__ +except ImportError: + BUILTINS = 'builtins' +else: + BUILTINS = '__builtin__' + diff --git a/v2/ansible/compat/tests/mock.py b/v2/ansible/compat/tests/mock.py new file mode 100644 index 00000000000..0614391c4b1 --- /dev/null +++ b/v2/ansible/compat/tests/mock.py @@ -0,0 +1,38 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python3.x's unittest.mock module +''' + +# Python 2.7 + +# Note: Could use the pypi mock library on python3.x as well as python2.x. It +# is the same as the python3 stdlib mock library + +try: + from unittest.mock import * +except ImportError: + # Python 2 + try: + from mock import * + except ImportError: + print('You need the mock library installed on python2.x to run tests') diff --git a/v2/ansible/compat/tests/unittest.py b/v2/ansible/compat/tests/unittest.py new file mode 100644 index 00000000000..a629849b315 --- /dev/null +++ b/v2/ansible/compat/tests/unittest.py @@ -0,0 +1,36 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python2.7's unittest module +''' + +import sys + +# Python 2.6 +if sys.version_info < (2, 7): + try: + # Need unittest2 on python2.6 + from unittest2 import * + except ImportError: + print('You need unittest2 installed on python2.6.x to run tests') +else: + from unittest import * diff --git a/v2/ansible/config/__init__.py b/v2/ansible/config/__init__.py new file mode 100644 index 00000000000..ae8ccff5952 --- /dev/null +++ b/v2/ansible/config/__init__.py @@ -0,0 +1,20 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py new file mode 100644 index 00000000000..1c2bc092b23 --- /dev/null +++ b/v2/ansible/constants.py @@ -0,0 +1,197 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import pwd +import sys + +from . compat import configparser + +from string import ascii_letters, digits + +# copied from utils, avoid circular reference fun :) +def mk_boolean(value): + if value is None: + return False + val = str(value) + if val.lower() in [ "true", "t", "y", "1", "yes" ]: + return True + else: + return False + +def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False): + ''' return a configuration variable with casting ''' + value = _get_config(p, section, key, env_var, default) + if boolean: + return mk_boolean(value) + if value and integer: + return int(value) + if value and floating: + return float(value) + if value and islist: + return [x.strip() for x in value.split(',')] + return value + +def _get_config(p, section, key, env_var, default): + ''' helper function for get_config ''' + if env_var is not None: + value = os.environ.get(env_var, None) + if value is not None: + return value + if p is not None: + try: + return p.get(section, key, raw=True) + except: + return default + return default + +def load_config_file(): + ''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible ''' + + p = configparser.ConfigParser() + + path0 = os.getenv("ANSIBLE_CONFIG", None) + if path0 is not None: + path0 = os.path.expanduser(path0) + path1 = os.getcwd() + "/ansible.cfg" + path2 = os.path.expanduser("~/.ansible.cfg") + path3 = "/etc/ansible/ansible.cfg" + + for path in [path0, path1, path2, path3]: + if path is not None and os.path.exists(path): + try: + p.read(path) + except configparser.Error as e: + print("Error reading config file: \n{0}".format(e)) + sys.exit(1) + return p + return None + +def shell_expand_path(path): + ''' shell_expand_path is needed as os.path.expanduser does not work + when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE ''' + if path: + path = os.path.expanduser(os.path.expandvars(path)) + return path + +p = load_config_file() + +active_user = pwd.getpwuid(os.geteuid())[0] + +# check all of these extensions when looking for yaml files for things like +# group variables -- really anything we can load +YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ] + +# sections in config file +DEFAULTS='defaults' + +# configurable things +DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True) +DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'inventory', 'ANSIBLE_INVENTORY', get_config(p, DEFAULTS,'hostfile','ANSIBLE_HOSTS', '/etc/ansible/hosts'))) +DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None) +DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles')) +DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp') +DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command') +DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*') +DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True) +DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '') +DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', 'en_US.UTF-8') +DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True) +DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True) +DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user) +DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True) +DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None)) +DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root') +DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True) +DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True) +DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True) +DEFAULT_VAULT_PASSWORD_FILE = shell_expand_path(get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None)) +DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart') +DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True) +DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}') +DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER') +DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True) +DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True) +DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo') +DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H') +DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace') +DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None) +DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh') +DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', 'su') +DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True) +DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', '') +DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root') +DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True) +DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() + +DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '/usr/share/ansible_plugins/action_plugins') +DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '/usr/share/ansible_plugins/cache_plugins') +DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '/usr/share/ansible_plugins/callback_plugins') +DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '/usr/share/ansible_plugins/connection_plugins') +DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '/usr/share/ansible_plugins/lookup_plugins') +DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '/usr/share/ansible_plugins/vars_plugins') +DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '/usr/share/ansible_plugins/filter_plugins') +DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) + +CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') +CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None) +CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts') +CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, integer=True) + +ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True) +ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True) +ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True) +DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True) +DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, boolean=True) +HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, boolean=True) +SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, boolean=True) +DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, boolean=True) +DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True) +COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True) +DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) + +# CONNECTION RELATED +ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None) +ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") +ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True) +PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True) +# obsolete -- will be formally removed in 1.6 +ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True) +ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True) +ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True) +ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, floating=True) +ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, integer=True) +ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys') +ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700') +ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600') +ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True) +PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True) + +# characters included in auto-generated passwords +DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" + +# non-configurable things +DEFAULT_SUDO_PASS = None +DEFAULT_REMOTE_PASS = None +DEFAULT_SUBSET = None +DEFAULT_SU_PASS = None +VAULT_VERSION_MIN = 1.0 +VAULT_VERSION_MAX = 1.0 diff --git a/v2/ansible/errors/__init__.py b/v2/ansible/errors/__init__.py new file mode 100644 index 00000000000..7effe41df7c --- /dev/null +++ b/v2/ansible/errors/__init__.py @@ -0,0 +1,169 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ansible.errors.yaml_strings import * + +class AnsibleError(Exception): + ''' + This is the base class for all errors raised from Ansible code, + and can be instantiated with two optional parameters beyond the + error message to control whether detailed information is displayed + when the error occurred while parsing a data file of some kind. + + Usage: + + raise AnsibleError('some message here', obj=obj, show_content=True) + + Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject, + which should be returned by the DataLoader() class. + ''' + + def __init__(self, message, obj=None, show_content=True): + # we import this here to prevent an import loop problem, + # since the objects code also imports ansible.errors + from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject + + self._obj = obj + self._show_content = show_content + if obj and isinstance(obj, AnsibleBaseYAMLObject): + extended_error = self._get_extended_error() + if extended_error: + self.message = 'ERROR! %s\n\n%s' % (message, extended_error) + else: + self.message = 'ERROR! %s' % message + + def __str__(self): + return self.message + + def __repr__(self): + return self.message + + def _get_error_lines_from_file(self, file_name, line_number): + ''' + Returns the line in the file which coresponds to the reported error + location, as well as the line preceding it (if the error did not + occur on the first line), to provide context to the error. + ''' + + target_line = '' + prev_line = '' + + with open(file_name, 'r') as f: + lines = f.readlines() + + target_line = lines[line_number] + if line_number > 0: + prev_line = lines[line_number - 1] + + return (target_line, prev_line) + + def _get_extended_error(self): + ''' + Given an object reporting the location of the exception in a file, return + detailed information regarding it including: + + * the line which caused the error as well as the one preceding it + * causes and suggested remedies for common syntax errors + + If this error was created with show_content=False, the reporting of content + is suppressed, as the file contents may be sensitive (ie. vault data). + ''' + + error_message = '' + + try: + (src_file, line_number, col_number) = self._obj.get_position_info() + error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number) + if src_file not in ('', '') and self._show_content: + (target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1) + if target_line: + stripped_line = target_line.replace(" ","") + arrow_line = (" " * (col_number-1)) + "^ here" + #header_line = ("=" * 73) + error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line) + + # common error/remediation checking here: + # check for unquoted vars starting lines + if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line): + error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR + # check for common dictionary mistakes + elif ":{{" in stripped_line and "}}" in stripped_line: + error_message += YAML_COMMON_DICT_ERROR + # check for common unquoted colon mistakes + elif len(target_line) and len(target_line) > 1 and len(target_line) > col_number and target_line[col_number] == ":" and target_line.count(':') > 1: + error_message += YAML_COMMON_UNQUOTED_COLON_ERROR + # otherwise, check for some common quoting mistakes + else: + parts = target_line.split(":") + if len(parts) > 1: + middle = parts[1].strip() + match = False + unbalanced = False + + if middle.startswith("'") and not middle.endswith("'"): + match = True + elif middle.startswith('"') and not middle.endswith('"'): + match = True + + if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and target_line.count("'") > 2 or target_line.count('"') > 2: + unbalanced = True + + if match: + error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR + if unbalanced: + error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR + + except (IOError, TypeError): + error_message += '\n(could not open file to display line)' + except IndexError: + error_message += '\n(specified line no longer in file, maybe it changed?)' + + return error_message + +class AnsibleParserError(AnsibleError): + ''' something was detected early that is wrong about a playbook or data file ''' + pass + +class AnsibleInternalError(AnsibleError): + ''' internal safeguards tripped, something happened in the code that should never happen ''' + pass + +class AnsibleRuntimeError(AnsibleError): + ''' ansible had a problem while running a playbook ''' + pass + +class AnsibleModuleError(AnsibleRuntimeError): + ''' a module failed somehow ''' + pass + +class AnsibleConnectionFailure(AnsibleRuntimeError): + ''' the transport / connection_plugin had a fatal error ''' + pass + +class AnsibleFilterError(AnsibleRuntimeError): + ''' a templating failure ''' + pass + +class AnsibleUndefinedVariable(AnsibleRuntimeError): + ''' a templating failure ''' + pass diff --git a/v2/ansible/errors/yaml_strings.py b/v2/ansible/errors/yaml_strings.py new file mode 100644 index 00000000000..dcd6ffd79fc --- /dev/null +++ b/v2/ansible/errors/yaml_strings.py @@ -0,0 +1,118 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +__all__ = [ + 'YAML_SYNTAX_ERROR', + 'YAML_POSITION_DETAILS', + 'YAML_COMMON_DICT_ERROR', + 'YAML_COMMON_UNQUOTED_VARIABLE_ERROR', + 'YAML_COMMON_UNQUOTED_COLON_ERROR', + 'YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR', + 'YAML_COMMON_UNBALANCED_QUOTES_ERROR', +] + +YAML_SYNTAX_ERROR = """\ +Syntax Error while loading YAML. +""" + +YAML_POSITION_DETAILS = """\ +The error appears to have been in '%s': line %s, column %s, but may +be elsewhere in the file depending on the exact syntax problem. +""" + +YAML_COMMON_DICT_ERROR = """\ +This one looks easy to fix. YAML thought it was looking for the start of a +hash/dictionary and was confused to see a second "{". Most likely this was +meant to be an ansible template evaluation instead, so we have to give the +parser a small hint that we wanted a string instead. The solution here is to +just quote the entire value. + +For instance, if the original line was: + + app_path: {{ base_path }}/foo + +It should be written as: + + app_path: "{{ base_path }}/foo" +""" + +YAML_COMMON_UNQUOTED_VARIABLE_ERROR = """\ +We could be wrong, but this one looks like it might be an issue with +missing quotes. Always quote template expression brackets when they +start a value. For instance: + + with_items: + - {{ foo }} + +Should be written as: + + with_items: + - "{{ foo }}" +""" + +YAML_COMMON_UNQUOTED_COLON_ERROR = """\ +This one looks easy to fix. There seems to be an extra unquoted colon in the line +and this is confusing the parser. It was only expecting to find one free +colon. The solution is just add some quotes around the colon, or quote the +entire line after the first colon. + +For instance, if the original line was: + + copy: src=file.txt dest=/path/filename:with_colon.txt + +It can be written as: + + copy: src=file.txt dest='/path/filename:with_colon.txt' + +Or: + + copy: 'src=file.txt dest=/path/filename:with_colon.txt' +""" + +YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR = """\ +This one looks easy to fix. It seems that there is a value started +with a quote, and the YAML parser is expecting to see the line ended +with the same kind of quote. For instance: + + when: "ok" in result.stdout + +Could be written as: + + when: '"ok" in result.stdout' + +Or equivalently: + + when: "'ok' in result.stdout" +""" + +YAML_COMMON_UNBALANCED_QUOTES_ERROR = """\ +We could be wrong, but this one looks like it might be an issue with +unbalanced quotes. If starting a value with a quote, make sure the +line ends with the same set of quotes. For instance this arbitrary +example: + + foo: "bad" "wolf" + +Could be written as: + + foo: '"bad" "wolf"' +""" + diff --git a/v2/ansible/executor/__init__.py b/v2/ansible/executor/__init__.py new file mode 100644 index 00000000000..785fc459921 --- /dev/null +++ b/v2/ansible/executor/__init__.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py new file mode 100644 index 00000000000..7522ac210c2 --- /dev/null +++ b/v2/ansible/executor/connection_info.py @@ -0,0 +1,207 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pipes +import random + +from ansible import constants as C +from ansible.template import Templar +from ansible.utils.boolean import boolean + + +__all__ = ['ConnectionInformation'] + + +class ConnectionInformation: + + ''' + This class is used to consolidate the connection information for + hosts in a play and child tasks, where the task may override some + connection/authentication information. + ''' + + def __init__(self, play=None, options=None): + # FIXME: implement the new methodology here for supporting + # various different auth escalation methods (becomes, etc.) + + self.connection = C.DEFAULT_TRANSPORT + self.remote_addr = None + self.remote_user = 'root' + self.password = '' + self.port = 22 + self.private_key_file = None + self.su = False + self.su_user = '' + self.su_pass = '' + self.sudo = False + self.sudo_user = '' + self.sudo_pass = '' + self.verbosity = 0 + self.only_tags = set() + self.skip_tags = set() + + self.no_log = False + self.check_mode = False + + if play: + self.set_play(play) + + if options: + self.set_options(options) + + def __repr__(self): + value = "CONNECTION INFO:\n" + fields = self._get_fields() + fields.sort() + for field in fields: + value += "%20s : %s\n" % (field, getattr(self, field)) + return value + + def set_play(self, play): + ''' + Configures this connection information instance with data from + the play class. + ''' + + if play.connection: + self.connection = play.connection + + self.remote_user = play.remote_user + self.password = '' + self.port = int(play.port) if play.port else 22 + self.su = play.su + self.su_user = play.su_user + self.su_pass = play.su_pass + self.sudo = play.sudo + self.sudo_user = play.sudo_user + self.sudo_pass = play.sudo_pass + + # non connection related + self.no_log = play.no_log + self.environment = play.environment + + def set_options(self, options): + ''' + Configures this connection information instance with data from + options specified by the user on the command line. These have a + higher precedence than those set on the play or host. + ''' + + # FIXME: set other values from options here? + + self.verbosity = options.verbosity + if options.connection: + self.connection = options.connection + + if options.check: + self.check_mode = boolean(options.check) + + # get the tag info from options, converting a comma-separated list + # of values into a proper list if need be. We check to see if the + # options have the attribute, as it is not always added via the CLI + if hasattr(options, 'tags'): + if isinstance(options.tags, list): + self.only_tags.update(options.tags) + elif isinstance(options.tags, basestring): + self.only_tags.update(options.tags.split(',')) + + if len(self.only_tags) == 0: + self.only_tags = set(['all']) + + if hasattr(options, 'skip_tags'): + if isinstance(options.skip_tags, list): + self.skip_tags.update(options.skip_tags) + elif isinstance(options.skip_tags, basestring): + self.skip_tags.update(options.skip_tags.split(',')) + + def copy(self, ci): + ''' + Copies the connection info from another connection info object, used + when merging in data from task overrides. + ''' + + for field in self._get_fields(): + value = getattr(ci, field, None) + if isinstance(value, dict): + setattr(self, field, value.copy()) + elif isinstance(value, set): + setattr(self, field, value.copy()) + elif isinstance(value, list): + setattr(self, field, value[:]) + else: + setattr(self, field, value) + + def set_task_override(self, task): + ''' + Sets attributes from the task if they are set, which will override + those from the play. + ''' + + new_info = ConnectionInformation() + new_info.copy(self) + + for attr in ('connection', 'remote_user', 'su', 'su_user', 'su_pass', 'sudo', 'sudo_user', 'sudo_pass', 'environment', 'no_log'): + if hasattr(task, attr): + attr_val = getattr(task, attr) + if attr_val: + setattr(new_info, attr, attr_val) + + return new_info + + def make_sudo_cmd(self, sudo_exe, executable, cmd): + """ + Helper function for wrapping commands with sudo. + + Rather than detect if sudo wants a password this time, -k makes + sudo always ask for a password if one is required. Passing a quoted + compound command to sudo (or sudo -s) directly doesn't work, so we + shellquote it with pipes.quote() and pass the quoted string to the + user's shell. We loop reading output until we see the randomly- + generated sudo prompt set with the -p option. + """ + + randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) + prompt = '[sudo via ansible, key=%s] password: ' % randbits + success_key = 'SUDO-SUCCESS-%s' % randbits + + sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % ( + sudo_exe, sudo_exe, C.DEFAULT_SUDO_FLAGS, prompt, + self.sudo_user, executable or '$SHELL', + pipes.quote('echo %s; %s' % (success_key, cmd)) + ) + + # FIXME: old code, can probably be removed as it's been commented out for a while + #return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt, success_key) + return (sudocmd, prompt, success_key) + + def _get_fields(self): + return [i for i in self.__dict__.keys() if i[:1] != '_'] + + def post_validate(self, variables, loader): + ''' + Finalizes templated values which may be set on this objects fields. + ''' + + templar = Templar(loader=loader, variables=variables) + for field in self._get_fields(): + value = templar.template(getattr(self, field)) + setattr(self, field, value) + diff --git a/v2/ansible/executor/host_log.py b/v2/ansible/executor/host_log.py new file mode 100644 index 00000000000..495ad79f7d4 --- /dev/null +++ b/v2/ansible/executor/host_log.py @@ -0,0 +1,43 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +class HostLog: + + def __init__(self, host): + self.host = host + + def add_task_result(self, task_result): + pass + + def has_failures(self): + assert False + + def has_changes(self): + assert False + + def get_tasks(self, are_executed=None, are_changed=None, are_successful=None): + assert False + + def get_current_running_task(self) + # atomic decorator likely required? + assert False + + diff --git a/v2/ansible/executor/host_log_manager.py b/v2/ansible/executor/host_log_manager.py new file mode 100644 index 00000000000..727d06ce591 --- /dev/null +++ b/v2/ansible/executor/host_log_manager.py @@ -0,0 +1,29 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +class HostLogManager: + + def __init__(self): + pass + + def get_log_for_host(self, host): + assert False + diff --git a/v2/ansible/executor/manager.py b/v2/ansible/executor/manager.py new file mode 100644 index 00000000000..33a76e143b9 --- /dev/null +++ b/v2/ansible/executor/manager.py @@ -0,0 +1,66 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from multiprocessing.managers import SyncManager, BaseProxy +from ansible.playbook.handler import Handler +from ansible.playbook.task import Task +from ansible.playbook.play import Play +from ansible.errors import AnsibleError + +__all__ = ['AnsibleManager'] + + +class VariableManagerWrapper: + ''' + This class simply acts as a wrapper around the VariableManager class, + since manager proxies expect a new object to be returned rather than + any existing one. Using this wrapper, a shared proxy can be created + and an existing VariableManager class assigned to it, which can then + be accessed through the exposed proxy methods. + ''' + + def __init__(self): + self._vm = None + + def get_vars(self, loader, play=None, host=None, task=None): + return self._vm.get_vars(loader=loader, play=play, host=host, task=task) + + def set_variable_manager(self, vm): + self._vm = vm + + def set_host_variable(self, host, varname, value): + self._vm.set_host_variable(host, varname, value) + + def set_host_facts(self, host, facts): + self._vm.set_host_facts(host, facts) + +class AnsibleManager(SyncManager): + ''' + This is our custom manager class, which exists only so we may register + the new proxy below + ''' + pass + +AnsibleManager.register( + typeid='VariableManagerWrapper', + callable=VariableManagerWrapper, +) + diff --git a/v2/ansible/executor/module_common.py b/v2/ansible/executor/module_common.py new file mode 100644 index 00000000000..9f878fb6b02 --- /dev/null +++ b/v2/ansible/executor/module_common.py @@ -0,0 +1,187 @@ +# (c) 2013-2014, Michael DeHaan +# (c) 2015 Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# from python and deps +from cStringIO import StringIO +import json +import os +import shlex + +# from Ansible +from ansible import __version__ +from ansible import constants as C +from ansible.errors import AnsibleError +from ansible.parsing.utils.jsonify import jsonify + +REPLACER = "#<>" +REPLACER_ARGS = "\"<>\"" +REPLACER_COMPLEX = "\"<>\"" +REPLACER_WINDOWS = "# POWERSHELL_COMMON" +REPLACER_VERSION = "\"<>\"" + +# We could end up writing out parameters with unicode characters so we need to +# specify an encoding for the python source file +ENCODING_STRING = '# -*- coding: utf-8 -*-' + +# we've moved the module_common relative to the snippets, so fix the path +_SNIPPET_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils') + +# ****************************************************************************** + +def _slurp(path): + if not os.path.exists(path): + raise AnsibleError("imported module support code does not exist at %s" % path) + fd = open(path) + data = fd.read() + fd.close() + return data + +def _find_snippet_imports(module_data, module_path, strip_comments): + """ + Given the source of the module, convert it to a Jinja2 template to insert + module code and return whether it's a new or old style module. + """ + + module_style = 'old' + if REPLACER in module_data: + module_style = 'new' + elif 'from ansible.module_utils.' in module_data: + module_style = 'new' + elif 'WANT_JSON' in module_data: + module_style = 'non_native_want_json' + + output = StringIO() + lines = module_data.split('\n') + snippet_names = [] + + for line in lines: + + if REPLACER in line: + output.write(_slurp(os.path.join(_SNIPPET_PATH, "basic.py"))) + snippet_names.append('basic') + if REPLACER_WINDOWS in line: + ps_data = _slurp(os.path.join(_SNIPPET_PATH, "powershell.ps1")) + output.write(ps_data) + snippet_names.append('powershell') + elif line.startswith('from ansible.module_utils.'): + tokens=line.split(".") + import_error = False + if len(tokens) != 3: + import_error = True + if " import *" not in line: + import_error = True + if import_error: + raise AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path) + snippet_name = tokens[2].split()[0] + snippet_names.append(snippet_name) + output.write(_slurp(os.path.join(_SNIPPET_PATH, snippet_name + ".py"))) + else: + if strip_comments and line.startswith("#") or line == '': + pass + output.write(line) + output.write("\n") + + if not module_path.endswith(".ps1"): + # Unixy modules + if len(snippet_names) > 0 and not 'basic' in snippet_names: + raise AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path) + else: + # Windows modules + if len(snippet_names) > 0 and not 'powershell' in snippet_names: + raise AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path) + + return (output.getvalue(), module_style) + +# ****************************************************************************** + +def modify_module(module_path, module_args, strip_comments=False): + """ + Used to insert chunks of code into modules before transfer rather than + doing regular python imports. This allows for more efficient transfer in + a non-bootstrapping scenario by not moving extra files over the wire and + also takes care of embedding arguments in the transferred modules. + + This version is done in such a way that local imports can still be + used in the module code, so IDEs don't have to be aware of what is going on. + + Example: + + from ansible.module_utils.basic import * + + ... will result in the insertion of basic.py into the module + from the module_utils/ directory in the source tree. + + All modules are required to import at least basic, though there will also + be other snippets. + + For powershell, there's equivalent conventions like this: + + # POWERSHELL_COMMON + + which results in the inclusion of the common code from powershell.ps1 + + """ + + with open(module_path) as f: + + # read in the module source + module_data = f.read() + + (module_data, module_style) = _find_snippet_imports(module_data, module_path, strip_comments) + + #module_args_json = jsonify(module_args) + module_args_json = json.dumps(module_args) + encoded_args = repr(module_args_json.encode('utf-8')) + + # these strings should be part of the 'basic' snippet which is required to be included + module_data = module_data.replace(REPLACER_VERSION, repr(__version__)) + module_data = module_data.replace(REPLACER_COMPLEX, encoded_args) + + # FIXME: we're not passing around an inject dictionary anymore, so + # this needs to be fixed with whatever method we use for vars + # like this moving forward + #if module_style == 'new': + # facility = C.DEFAULT_SYSLOG_FACILITY + # if 'ansible_syslog_facility' in inject: + # facility = inject['ansible_syslog_facility'] + # module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility) + + lines = module_data.split("\n", 1) + shebang = None + if lines[0].startswith("#!"): + shebang = lines[0].strip() + args = shlex.split(str(shebang[2:])) + interpreter = args[0] + interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter) + + # FIXME: more inject stuff here... + #if interpreter_config in inject: + # lines[0] = shebang = "#!%s %s" % (inject[interpreter_config], " ".join(args[1:])) + + lines.insert(1, ENCODING_STRING) + else: + lines.insert(0, ENCODING_STRING) + + module_data = "\n".join(lines) + + return (module_data, module_style, shebang) + diff --git a/v2/ansible/executor/play_iterator.py b/v2/ansible/executor/play_iterator.py new file mode 100644 index 00000000000..f1d8914f84f --- /dev/null +++ b/v2/ansible/executor/play_iterator.py @@ -0,0 +1,269 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import * +from ansible.playbook.task import Task + +from ansible.utils.boolean import boolean + +__all__ = ['PlayIterator'] + + +# the primary running states for the play iteration +ITERATING_SETUP = 0 +ITERATING_TASKS = 1 +ITERATING_RESCUE = 2 +ITERATING_ALWAYS = 3 +ITERATING_COMPLETE = 4 + +# the failure states for the play iteration +FAILED_NONE = 0 +FAILED_SETUP = 1 +FAILED_TASKS = 2 +FAILED_RESCUE = 3 +FAILED_ALWAYS = 4 + +class PlayState: + + ''' + A helper class, which keeps track of the task iteration + state for a given playbook. This is used in the PlaybookIterator + class on a per-host basis. + ''' + + # FIXME: this class is the representation of a finite state machine, + # so we really should have a well defined state representation + # documented somewhere... + + def __init__(self, parent_iterator, host): + ''' + Create the initial state, which tracks the running state as well + as the failure state, which are used when executing block branches + (rescue/always) + ''' + + self._parent_iterator = parent_iterator + self._run_state = ITERATING_SETUP + self._failed_state = FAILED_NONE + self._task_list = parent_iterator._play.compile() + self._gather_facts = parent_iterator._play.gather_facts + self._host = host + + self._cur_block = None + self._cur_role = None + self._cur_task_pos = 0 + self._cur_rescue_pos = 0 + self._cur_always_pos = 0 + self._cur_handler_pos = 0 + + def next(self, peek=False): + ''' + Determines and returns the next available task from the playbook, + advancing through the list of plays as it goes. If peek is set to True, + the internal state is not stored. + ''' + + task = None + + # save this locally so that we can peek at the next task + # without updating the internal state of the iterator + run_state = self._run_state + failed_state = self._failed_state + cur_block = self._cur_block + cur_role = self._cur_role + cur_task_pos = self._cur_task_pos + cur_rescue_pos = self._cur_rescue_pos + cur_always_pos = self._cur_always_pos + cur_handler_pos = self._cur_handler_pos + + + while True: + if run_state == ITERATING_SETUP: + if failed_state == FAILED_SETUP: + run_state = ITERATING_COMPLETE + else: + run_state = ITERATING_TASKS + + if self._gather_facts == 'smart' and not self._host.gathered_facts or boolean(self._gather_facts): + self._host.set_gathered_facts(True) + task = Task() + # FIXME: this is not the best way to get this... + task.set_loader(self._parent_iterator._play._loader) + task.action = 'setup' + break + elif run_state == ITERATING_TASKS: + # if there is any failure state besides FAILED_NONE, we should + # change to some other running state + if failed_state != FAILED_NONE or cur_task_pos > len(self._task_list) - 1: + # if there is a block (and there always should be), start running + # the rescue portion if it exists (and if we haven't failed that + # already), or the always portion (if it exists and we didn't fail + # there too). Otherwise, we're done iterating. + if cur_block: + if failed_state != FAILED_RESCUE and cur_block.rescue: + run_state = ITERATING_RESCUE + cur_rescue_pos = 0 + elif failed_state != FAILED_ALWAYS and cur_block.always: + run_state = ITERATING_ALWAYS + cur_always_pos = 0 + else: + run_state = ITERATING_COMPLETE + else: + run_state = ITERATING_COMPLETE + else: + task = self._task_list[cur_task_pos] + if cur_block is not None and cur_block != task._block: + run_state = ITERATING_ALWAYS + continue + else: + cur_block = task._block + cur_task_pos += 1 + + # Break out of the while loop now that we have our task + break + + elif run_state == ITERATING_RESCUE: + # If we're iterating through the rescue tasks, make sure we haven't + # failed yet. If so, move on to the always block or if not get the + # next rescue task (if one exists) + if failed_state == FAILED_RESCUE or cur_block.rescue is None or cur_rescue_pos > len(cur_block.rescue) - 1: + run_state = ITERATING_ALWAYS + else: + task = cur_block.rescue[cur_rescue_pos] + cur_rescue_pos += 1 + break + + elif run_state == ITERATING_ALWAYS: + # If we're iterating through the always tasks, make sure we haven't + # failed yet. If so, we're done iterating otherwise get the next always + # task (if one exists) + if failed_state == FAILED_ALWAYS or cur_block.always is None or cur_always_pos > len(cur_block.always) - 1: + cur_block = None + if failed_state == FAILED_ALWAYS or cur_task_pos > len(self._task_list) - 1: + run_state = ITERATING_COMPLETE + else: + run_state = ITERATING_TASKS + else: + task = cur_block.always[cur_always_pos] + cur_always_pos += 1 + break + + elif run_state == ITERATING_COMPLETE: + # done iterating, return None to signify that + return None + + if task._role: + # if we had a current role, mark that role as completed + if cur_role and task._role != cur_role and not peek: + cur_role._completed = True + + cur_role = task._role + + # if the current role has not had its task run flag set, mark + # clear the completed flag so we can correctly determine if the + # role was run + if not cur_role._had_task_run and not peek: + cur_role._completed = False + + # If we're not just peeking at the next task, save the internal state + if not peek: + self._run_state = run_state + self._failed_state = failed_state + self._cur_block = cur_block + self._cur_role = cur_role + self._cur_task_pos = cur_task_pos + self._cur_rescue_pos = cur_rescue_pos + self._cur_always_pos = cur_always_pos + self._cur_handler_pos = cur_handler_pos + + return task + + def mark_failed(self): + ''' + Escalates the failed state relative to the running state. + ''' + if self._run_state == ITERATING_SETUP: + self._failed_state = FAILED_SETUP + elif self._run_state == ITERATING_TASKS: + self._failed_state = FAILED_TASKS + elif self._run_state == ITERATING_RESCUE: + self._failed_state = FAILED_RESCUE + elif self._run_state == ITERATING_ALWAYS: + self._failed_state = FAILED_ALWAYS + + +class PlayIterator: + + ''' + The main iterator class, which keeps the state of the playbook + on a per-host basis using the above PlaybookState class. + ''' + + def __init__(self, inventory, play): + self._play = play + self._inventory = inventory + self._host_entries = dict() + self._first_host = None + + # Build the per-host dictionary of playbook states, using a copy + # of the play object so we can post_validate it to ensure any templated + # fields are filled in without modifying the original object, since + # post_validate() saves the templated values. + + # FIXME: this is a hacky way of doing this, the iterator should + # instead get the loader and variable manager directly + # as args to __init__ + all_vars = inventory._variable_manager.get_vars(loader=inventory._loader, play=play) + new_play = play.copy() + new_play.post_validate(all_vars, fail_on_undefined=False) + + for host in inventory.get_hosts(new_play.hosts): + if self._first_host is None: + self._first_host = host + self._host_entries[host.get_name()] = PlayState(parent_iterator=self, host=host) + + # FIXME: remove, probably not required anymore + #def get_next_task(self, peek=False): + # ''' returns the next task for host[0] ''' + # + # first_entry = self._host_entries[self._first_host.get_name()] + # if not peek: + # for entry in self._host_entries: + # if entry != self._first_host.get_name(): + # target_entry = self._host_entries[entry] + # if target_entry._cur_task_pos == first_entry._cur_task_pos: + # target_entry.next() + # return first_entry.next(peek=peek) + + def get_next_task_for_host(self, host, peek=False): + ''' fetch the next task for the given host ''' + if host.get_name() not in self._host_entries: + raise AnsibleError("invalid host (%s) specified for playbook iteration" % host) + + return self._host_entries[host.get_name()].next(peek=peek) + + def mark_host_failed(self, host): + ''' mark the given host as failed ''' + if host.get_name() not in self._host_entries: + raise AnsibleError("invalid host (%s) specified for playbook iteration" % host) + + self._host_entries[host.get_name()].mark_failed() + diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py new file mode 100644 index 00000000000..88ec05b9e86 --- /dev/null +++ b/v2/ansible/executor/playbook_executor.py @@ -0,0 +1,131 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import signal + +from ansible import constants as C +from ansible.errors import * +from ansible.executor.task_queue_manager import TaskQueueManager +from ansible.playbook import Playbook + +from ansible.utils.debug import debug + +class PlaybookExecutor: + + ''' + This is the primary class for executing playbooks, and thus the + basis for bin/ansible-playbook operation. + ''' + + def __init__(self, playbooks, inventory, variable_manager, loader, options): + self._playbooks = playbooks + self._inventory = inventory + self._variable_manager = variable_manager + self._loader = loader + self._options = options + + self._tqm = TaskQueueManager(inventory=inventory, callback='default', variable_manager=variable_manager, loader=loader, options=options) + + def run(self): + + ''' + Run the given playbook, based on the settings in the play which + may limit the runs to serialized groups, etc. + ''' + + signal.signal(signal.SIGINT, self._cleanup) + + result = 0 + try: + for playbook_path in self._playbooks: + pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) + + # FIXME: playbook entries are just plays, so we should rename them + for play in pb.get_entries(): + self._inventory.remove_restriction() + + # Create a temporary copy of the play here, so we can run post_validate + # on it without the templating changes affecting the original object. + all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) + new_play = play.copy() + new_play.post_validate(all_vars, fail_on_undefined=False) + + for batch in self._get_serialized_batches(new_play): + if len(batch) == 0: + self._tqm._callback.playbook_on_play_start(new_play.name) + self._tqm._callback.playbook_on_no_hosts_matched() + result = 0 + break + # restrict the inventory to the hosts in the serialized batch + self._inventory.restrict_to_hosts(batch) + # and run it... + result = self._tqm.run(play=play) + if result != 0: + break + + if result != 0: + # FIXME: do something here, to signify the playbook execution failed + self._cleanup() + return result + except: + self._cleanup() + raise + + self._cleanup() + return result + + def _cleanup(self, signum=None, framenum=None): + return self._tqm.cleanup() + + def _get_serialized_batches(self, play): + ''' + Returns a list of hosts, subdivided into batches based on + the serial size specified in the play. + ''' + + # make sure we have a unique list of hosts + all_hosts = self._inventory.get_hosts(play.hosts) + + # check to see if the serial number was specified as a percentage, + # and convert it to an integer value based on the number of hosts + if isinstance(play.serial, basestring) and play.serial.endswith('%'): + serial_pct = int(play.serial.replace("%","")) + serial = int((serial_pct/100.0) * len(all_hosts)) + else: + serial = int(play.serial) + + # if the serial count was not specified or is invalid, default to + # a list of all hosts, otherwise split the list of hosts into chunks + # which are based on the serial size + if serial <= 0: + return [all_hosts] + else: + serialized_batches = [] + + while len(all_hosts) > 0: + play_hosts = [] + for x in range(serial): + if len(all_hosts) > 0: + play_hosts.append(all_hosts.pop(0)) + + serialized_batches.append(play_hosts) + + return serialized_batches diff --git a/v2/ansible/executor/process/__init__.py b/v2/ansible/executor/process/__init__.py new file mode 100644 index 00000000000..785fc459921 --- /dev/null +++ b/v2/ansible/executor/process/__init__.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/ansible/executor/process/result.py b/v2/ansible/executor/process/result.py new file mode 100644 index 00000000000..b9e54df9dc1 --- /dev/null +++ b/v2/ansible/executor/process/result.py @@ -0,0 +1,170 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import Queue +import multiprocessing +import os +import signal +import sys +import time +import traceback + +HAS_ATFORK=True +try: + from Crypto.Random import atfork +except ImportError: + HAS_ATFORK=False + +from ansible.playbook.handler import Handler +from ansible.playbook.task import Task + +from ansible.utils.debug import debug + +__all__ = ['ResultProcess'] + + +class ResultProcess(multiprocessing.Process): + ''' + The result worker thread, which reads results from the results + queue and fires off callbacks/etc. as necessary. + ''' + + def __init__(self, final_q, workers): + + # takes a task queue manager as the sole param: + self._final_q = final_q + self._workers = workers + self._cur_worker = 0 + self._terminated = False + + super(ResultProcess, self).__init__() + + def _send_result(self, result): + debug("sending result: %s" % (result,)) + self._final_q.put(result, block=False) + debug("done sending result") + + def _read_worker_result(self): + result = None + starting_point = self._cur_worker + while True: + (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker] + self._cur_worker += 1 + if self._cur_worker >= len(self._workers): + self._cur_worker = 0 + + try: + if not rslt_q.empty(): + debug("worker %d has data to read" % self._cur_worker) + result = rslt_q.get(block=False) + debug("got a result from worker %d: %s" % (self._cur_worker, result)) + break + except Queue.Empty: + pass + + if self._cur_worker == starting_point: + break + + return result + + def terminate(self): + self._terminated = True + super(ResultProcess, self).terminate() + + def run(self): + ''' + The main thread execution, which reads from the results queue + indefinitely and sends callbacks/etc. when results are received. + ''' + + if HAS_ATFORK: + atfork() + + while True: + try: + result = self._read_worker_result() + if result is None: + time.sleep(0.1) + continue + + host_name = result._host.get_name() + + # send callbacks, execute other options based on the result status + # FIXME: this should all be cleaned up and probably moved to a sub-function. + # the fact that this sometimes sends a TaskResult and other times + # sends a raw dictionary back may be confusing, but the result vs. + # results implementation for tasks with loops should be cleaned up + # better than this + if result.is_unreachable(): + self._send_result(('host_unreachable', result)) + elif result.is_failed(): + self._send_result(('host_task_failed', result)) + elif result.is_skipped(): + self._send_result(('host_task_skipped', result)) + else: + # if this task is notifying a handler, do it now + if result._task.notify: + # The shared dictionary for notified handlers is a proxy, which + # does not detect when sub-objects within the proxy are modified. + # So, per the docs, we reassign the list so the proxy picks up and + # notifies all other threads + for notify in result._task.notify: + self._send_result(('notify_handler', result._host, notify)) + + if result._task.loop: + # this task had a loop, and has more than one result, so + # loop over all of them instead of a single result + result_items = result._result['results'] + else: + result_items = [ result._result ] + + for result_item in result_items: + if 'add_host' in result_item: + # this task added a new host (add_host module) + self._send_result(('add_host', result_item)) + elif 'add_group' in result_item: + # this task added a new group (group_by module) + self._send_result(('add_group', result._host, result_item)) + elif 'ansible_facts' in result_item: + # if this task is registering facts, do that now + if result._task.action in ('set_fact', 'include_vars'): + for (key, value) in result_item['ansible_facts'].iteritems(): + self._send_result(('set_host_var', result._host, key, value)) + else: + self._send_result(('set_host_facts', result._host, result_item['ansible_facts'])) + + # finally, send the ok for this task + self._send_result(('host_task_ok', result)) + + # if this task is registering a result, do it now + if result._task.register: + self._send_result(('set_host_var', result._host, result._task.register, result._result)) + + except Queue.Empty: + pass + except (KeyboardInterrupt, IOError, EOFError): + break + except: + # FIXME: we should probably send a proper callback here instead of + # simply dumping a stack trace on the screen + traceback.print_exc() + break + diff --git a/v2/ansible/executor/process/worker.py b/v2/ansible/executor/process/worker.py new file mode 100644 index 00000000000..bf5ee8c93f0 --- /dev/null +++ b/v2/ansible/executor/process/worker.py @@ -0,0 +1,158 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import Queue +import multiprocessing +import os +import signal +import sys +import time +import traceback + +HAS_ATFORK=True +try: + from Crypto.Random import atfork +except ImportError: + HAS_ATFORK=False + +from ansible.errors import AnsibleError, AnsibleConnectionFailure +from ansible.executor.task_executor import TaskExecutor +from ansible.executor.task_result import TaskResult +from ansible.playbook.handler import Handler +from ansible.playbook.task import Task + +from ansible.utils.debug import debug + +__all__ = ['ExecutorProcess'] + + +class WorkerProcess(multiprocessing.Process): + ''' + The worker thread class, which uses TaskExecutor to run tasks + read from a job queue and pushes results into a results queue + for reading later. + ''' + + def __init__(self, tqm, main_q, rslt_q, loader, new_stdin): + + # takes a task queue manager as the sole param: + self._main_q = main_q + self._rslt_q = rslt_q + self._loader = loader + + # dupe stdin, if we have one + try: + fileno = sys.stdin.fileno() + except ValueError: + fileno = None + + self._new_stdin = new_stdin + if not new_stdin and fileno is not None: + try: + self._new_stdin = os.fdopen(os.dup(fileno)) + except OSError, e: + # couldn't dupe stdin, most likely because it's + # not a valid file descriptor, so we just rely on + # using the one that was passed in + pass + + if self._new_stdin: + sys.stdin = self._new_stdin + + super(WorkerProcess, self).__init__() + + def run(self): + ''' + Called when the process is started, and loops indefinitely + until an error is encountered (typically an IOerror from the + queue pipe being disconnected). During the loop, we attempt + to pull tasks off the job queue and run them, pushing the result + onto the results queue. We also remove the host from the blocked + hosts list, to signify that they are ready for their next task. + ''' + + if HAS_ATFORK: + atfork() + + while True: + task = None + try: + if not self._main_q.empty(): + debug("there's work to be done!") + (host, task, basedir, job_vars, connection_info, module_loader) = self._main_q.get(block=False) + debug("got a task/handler to work on: %s" % task) + + # because the task queue manager starts workers (forks) before the + # playbook is loaded, set the basedir of the loader inherted by + # this fork now so that we can find files correctly + self._loader.set_basedir(basedir) + + # Serializing/deserializing tasks does not preserve the loader attribute, + # since it is passed to the worker during the forking of the process and + # would be wasteful to serialize. So we set it here on the task now, and + # the task handles updating parent/child objects as needed. + task.set_loader(self._loader) + + # apply the given task's information to the connection info, + # which may override some fields already set by the play or + # the options specified on the command line + new_connection_info = connection_info.set_task_override(task) + + # execute the task and build a TaskResult from the result + debug("running TaskExecutor() for %s/%s" % (host, task)) + executor_result = TaskExecutor(host, task, job_vars, new_connection_info, self._loader, module_loader).run() + debug("done running TaskExecutor() for %s/%s" % (host, task)) + task_result = TaskResult(host, task, executor_result) + + # put the result on the result queue + debug("sending task result") + self._rslt_q.put(task_result, block=False) + debug("done sending task result") + + else: + time.sleep(0.1) + + except Queue.Empty: + pass + except (IOError, EOFError, KeyboardInterrupt): + break + except AnsibleConnectionFailure: + try: + if task: + task_result = TaskResult(host, task, dict(unreachable=True)) + self._rslt_q.put(task_result, block=False) + except: + # FIXME: most likely an abort, catch those kinds of errors specifically + break + except Exception, e: + debug("WORKER EXCEPTION: %s" % e) + debug("WORKER EXCEPTION: %s" % traceback.format_exc()) + try: + if task: + task_result = TaskResult(host, task, dict(failed=True, exception=traceback.format_exc(), stdout='')) + self._rslt_q.put(task_result, block=False) + except: + # FIXME: most likely an abort, catch those kinds of errors specifically + break + + debug("WORKER PROCESS EXITING") + + diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py new file mode 100644 index 00000000000..91631aebb50 --- /dev/null +++ b/v2/ansible/executor/task_executor.py @@ -0,0 +1,391 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible import constants as C +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.executor.connection_info import ConnectionInformation +from ansible.playbook.conditional import Conditional +from ansible.playbook.task import Task +from ansible.plugins import lookup_loader, connection_loader, action_loader +from ansible.utils.listify import listify_lookup_plugin_terms + +from ansible.utils.debug import debug + +__all__ = ['TaskExecutor'] + +import json +import time + +class TaskExecutor: + + ''' + This is the main worker class for the executor pipeline, which + handles loading an action plugin to actually dispatch the task to + a given host. This class roughly corresponds to the old Runner() + class. + ''' + + def __init__(self, host, task, job_vars, connection_info, loader, module_loader): + self._host = host + self._task = task + self._job_vars = job_vars + self._connection_info = connection_info + self._loader = loader + self._module_loader = module_loader + + def run(self): + ''' + The main executor entrypoint, where we determine if the specified + task requires looping and either runs the task with + ''' + + debug("in run()") + + try: + # lookup plugins need to know if this task is executing from + # a role, so that it can properly find files/templates/etc. + roledir = None + if self._task._role: + roledir = self._task._role._role_path + self._job_vars['roledir'] = roledir + + items = self._get_loop_items() + if items is not None: + if len(items) > 0: + item_results = self._run_loop(items) + res = dict(results=item_results) + else: + res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[]) + else: + debug("calling self._execute()") + res = self._execute() + debug("_execute() done") + + # make sure changed is set in the result, if it's not present + if 'changed' not in res: + res['changed'] = False + + debug("dumping result to json") + result = json.dumps(res) + debug("done dumping result, returning") + return result + except AnsibleError, e: + return dict(failed=True, msg=str(e)) + + def _get_loop_items(self): + ''' + Loads a lookup plugin to handle the with_* portion of a task (if specified), + and returns the items result. + ''' + + items = None + if self._task.loop and self._task.loop in lookup_loader: + loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, variables=self._job_vars, loader=self._loader) + items = lookup_loader.get(self._task.loop, loader=self._loader).run(terms=loop_terms, variables=self._job_vars) + + return items + + def _run_loop(self, items): + ''' + Runs the task with the loop items specified and collates the result + into an array named 'results' which is inserted into the final result + along with the item for which the loop ran. + ''' + + results = [] + + # make copies of the job vars and task so we can add the item to + # the variables and re-validate the task with the item variable + task_vars = self._job_vars.copy() + + items = self._squash_items(items, task_vars) + for item in items: + task_vars['item'] = item + + try: + tmp_task = self._task.copy() + except AnsibleParserError, e: + results.append(dict(failed=True, msg=str(e))) + continue + + # now we swap the internal task with the copy, execute, + # and swap them back so we can do the next iteration cleanly + (self._task, tmp_task) = (tmp_task, self._task) + res = self._execute(variables=task_vars) + (self._task, tmp_task) = (tmp_task, self._task) + + # FIXME: we should be sending back a callback result for each item in the loop here + + # now update the result with the item info, and append the result + # to the list of results + res['item'] = item + results.append(res) + + return results + + def _squash_items(self, items, variables): + ''' + Squash items down to a comma-separated list for certain modules which support it + (typically package management modules). + ''' + + if len(items) > 0 and self._task.action in ('apt', 'yum', 'pkgng', 'zypper'): + final_items = [] + for item in items: + variables['item'] = item + if self._task.evaluate_conditional(variables): + final_items.append(item) + return [",".join(final_items)] + else: + return items + + def _execute(self, variables=None): + ''' + The primary workhorse of the executor system, this runs the task + on the specified host (which may be the delegated_to host) and handles + the retry/until and block rescue/always execution + ''' + + if variables is None: + variables = self._job_vars + + # fields set from the play/task may be based on variables, so we have to + # do the same kind of post validation step on it here before we use it + self._connection_info.post_validate(variables=variables, loader=self._loader) + + # get the connection and the handler for this execution + self._connection = self._get_connection(variables) + self._handler = self._get_action_handler(connection=self._connection) + + # Evaluate the conditional (if any) for this task, which we do before running + # the final task post-validation. We do this before the post validation due to + # the fact that the conditional may specify that the task be skipped due to a + # variable not being present which would otherwise cause validation to fail + if not self._task.evaluate_conditional(variables): + debug("when evaulation failed, skipping this task") + return dict(changed=False, skipped=True, skip_reason='Conditional check failed') + + # Now we do final validation on the task, which sets all fields to their final values + self._task.post_validate(variables) + + # And filter out any fields which were set to default(omit), and got the omit token value + omit_token = variables.get('omit') + if omit_token is not None: + self._task.args = dict(filter(lambda x: x[1] != omit_token, self._task.args.iteritems())) + + # Read some values from the task, so that we can modify them if need be + retries = self._task.retries + if retries <= 0: + retries = 1 + + delay = self._task.delay + if delay < 0: + delay = 1 + + # make a copy of the job vars here, in case we need to update them + # with the registered variable value later on when testing conditions + vars_copy = variables.copy() + + + debug("starting attempt loop") + result = None + for attempt in range(retries): + if attempt > 0: + # FIXME: this should use the callback/message passing mechanism + print("FAILED - RETRYING: %s (%d retries left)" % (self._task, retries-attempt)) + result['attempts'] = attempt + 1 + + debug("running the handler") + result = self._handler.run(task_vars=variables) + debug("handler run complete") + + if self._task.async > 0: + # the async_wrapper module returns dumped JSON via its stdout + # response, so we parse it here and replace the result + try: + result = json.loads(result.get('stdout')) + except ValueError, e: + return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e)) + + if self._task.poll > 0: + result = self._poll_async_result(result=result) + + # update the local copy of vars with the registered value, if specified + if self._task.register: + vars_copy[self._task.register] = result + + # create a conditional object to evaluate task conditions + cond = Conditional(loader=self._loader) + + # FIXME: make sure until is mutually exclusive with changed_when/failed_when + if self._task.until: + cond.when = self._task.until + if cond.evaluate_conditional(vars_copy): + break + elif (self._task.changed_when or self._task.failed_when) and 'skipped' not in result: + if self._task.changed_when: + cond.when = [ self._task.changed_when ] + result['changed'] = cond.evaluate_conditional(vars_copy) + if self._task.failed_when: + cond.when = [ self._task.failed_when ] + failed_when_result = cond.evaluate_conditional(vars_copy) + result['failed_when_result'] = result['failed'] = failed_when_result + if failed_when_result: + break + elif 'failed' not in result and result.get('rc', 0) == 0: + # if the result is not failed, stop trying + break + + if attempt < retries - 1: + time.sleep(delay) + + debug("attempt loop complete, returning result") + return result + + def _poll_async_result(self, result): + ''' + Polls for the specified JID to be complete + ''' + + async_jid = result.get('ansible_job_id') + if async_jid is None: + return dict(failed=True, msg="No job id was returned by the async task") + + # Create a new psuedo-task to run the async_status module, and run + # that (with a sleep for "poll" seconds between each retry) until the + # async time limit is exceeded. + + async_task = Task().load(dict(action='async_status jid=%s' % async_jid)) + + # Because this is an async task, the action handler is async. However, + # we need the 'normal' action handler for the status check, so get it + # now via the action_loader + normal_handler = action_loader.get( + 'normal', + task=async_task, + connection=self._connection, + connection_info=self._connection_info, + loader=self._loader, + module_loader=self._module_loader, + ) + + time_left = self._task.async + while time_left > 0: + time.sleep(self._task.poll) + + async_result = normal_handler.run() + if int(async_result.get('finished', 0)) == 1 or 'failed' in async_result or 'skipped' in async_result: + break + + time_left -= self._task.poll + + if int(async_result.get('finished', 0)) != 1: + return dict(failed=True, msg="async task did not complete within the requested time") + else: + return async_result + + def _get_connection(self, variables): + ''' + Reads the connection property for the host, and returns the + correct connection object from the list of connection plugins + ''' + + # FIXME: delegate_to calculation should be done here + # FIXME: calculation of connection params/auth stuff should be done here + + self._connection_info.remote_addr = self._host.ipv4_address + if self._task.delegate_to is not None: + self._compute_delegate(variables) + + # FIXME: add all port/connection type munging here (accelerated mode, + # fixing up options for ssh, etc.)? and 'smart' conversion + conn_type = self._connection_info.connection + if conn_type == 'smart': + conn_type = 'ssh' + + connection = connection_loader.get(conn_type, self._connection_info) + if not connection: + raise AnsibleError("the connection plugin '%s' was not found" % conn_type) + + connection.connect() + + return connection + + def _get_action_handler(self, connection): + ''' + Returns the correct action plugin to handle the requestion task action + ''' + + if self._task.action in action_loader: + if self._task.async != 0: + raise AnsibleError("async mode is not supported with the %s module" % module_name) + handler_name = self._task.action + elif self._task.async == 0: + handler_name = 'normal' + else: + handler_name = 'async' + + handler = action_loader.get( + handler_name, + task=self._task, + connection=connection, + connection_info=self._connection_info, + loader=self._loader, + module_loader=self._module_loader, + ) + if not handler: + raise AnsibleError("the handler '%s' was not found" % handler_name) + + return handler + + def _compute_delegate(self, variables): + + # get the vars for the delegate by its name + try: + this_info = variables['hostvars'][self._task.delegate_to] + except: + # make sure the inject is empty for non-inventory hosts + this_info = {} + + # get the real ssh_address for the delegate and allow ansible_ssh_host to be templated + #self._connection_info.remote_user = self._compute_delegate_user(self.delegate_to, delegate['inject']) + self._connection_info.remote_addr = this_info.get('ansible_ssh_host', self._task.delegate_to) + self._connection_info.port = this_info.get('ansible_ssh_port', self._connection_info.port) + self._connection_info.password = this_info.get('ansible_ssh_pass', self._connection_info.password) + self._connection_info.private_key_file = this_info.get('ansible_ssh_private_key_file', self._connection_info.private_key_file) + self._connection_info.connection = this_info.get('ansible_connection', self._connection_info.connection) + self._connection_info.sudo_pass = this_info.get('ansible_sudo_pass', self._connection_info.sudo_pass) + + if self._connection_info.remote_addr in ('127.0.0.1', 'localhost'): + self._connection_info.connection = 'local' + + # Last chance to get private_key_file from global variables. + # this is useful if delegated host is not defined in the inventory + #if delegate['private_key_file'] is None: + # delegate['private_key_file'] = remote_inject.get('ansible_ssh_private_key_file', None) + + #if delegate['private_key_file'] is not None: + # delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file']) + + for i in this_info: + if i.startswith("ansible_") and i.endswith("_interpreter"): + variables[i] = this_info[i] + diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py new file mode 100644 index 00000000000..72ff04d53dc --- /dev/null +++ b/v2/ansible/executor/task_queue_manager.py @@ -0,0 +1,209 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import multiprocessing +import os +import socket +import sys + +from ansible.errors import AnsibleError +from ansible.executor.connection_info import ConnectionInformation +#from ansible.executor.manager import AnsibleManager +from ansible.executor.play_iterator import PlayIterator +from ansible.executor.process.worker import WorkerProcess +from ansible.executor.process.result import ResultProcess +from ansible.plugins import callback_loader, strategy_loader + +from ansible.utils.debug import debug + +__all__ = ['TaskQueueManager'] + + +class TaskQueueManager: + + ''' + This class handles the multiprocessing requirements of Ansible by + creating a pool of worker forks, a result handler fork, and a + manager object with shared datastructures/queues for coordinating + work between all processes. + + The queue manager is responsible for loading the play strategy plugin, + which dispatches the Play's tasks to hosts. + ''' + + def __init__(self, inventory, callback, variable_manager, loader, options): + + self._inventory = inventory + self._variable_manager = variable_manager + self._loader = loader + self._options = options + + # a special flag to help us exit cleanly + self._terminated = False + + # create and start the multiprocessing manager + #self._manager = AnsibleManager() + #self._manager.start() + + # this dictionary is used to keep track of notified handlers + self._notified_handlers = dict() + + # dictionaries to keep track of failed/unreachable hosts + self._failed_hosts = dict() + self._unreachable_hosts = dict() + + self._final_q = multiprocessing.Queue() + + # FIXME: hard-coded the default callback plugin here, which + # should be configurable. + self._callback = callback_loader.get(callback) + + # create the pool of worker threads, based on the number of forks specified + try: + fileno = sys.stdin.fileno() + except ValueError: + fileno = None + + self._workers = [] + for i in range(self._options.forks): + # duplicate stdin, if possible + new_stdin = None + if fileno is not None: + try: + new_stdin = os.fdopen(os.dup(fileno)) + except OSError, e: + # couldn't dupe stdin, most likely because it's + # not a valid file descriptor, so we just rely on + # using the one that was passed in + pass + + main_q = multiprocessing.Queue() + rslt_q = multiprocessing.Queue() + + prc = WorkerProcess(self, main_q, rslt_q, loader, new_stdin) + prc.start() + + self._workers.append((prc, main_q, rslt_q)) + + self._result_prc = ResultProcess(self._final_q, self._workers) + self._result_prc.start() + + def _initialize_notified_handlers(self, handlers): + ''' + Clears and initializes the shared notified handlers dict with entries + for each handler in the play, which is an empty array that will contain + inventory hostnames for those hosts triggering the handler. + ''' + + # Zero the dictionary first by removing any entries there. + # Proxied dicts don't support iteritems, so we have to use keys() + for key in self._notified_handlers.keys(): + del self._notified_handlers[key] + + # FIXME: there is a block compile helper for this... + handler_list = [] + for handler_block in handlers: + handler_list.extend(handler_block.compile()) + + # then initalize it with the handler names from the handler list + for handler in handler_list: + self._notified_handlers[handler.get_name()] = [] + + def run(self, play): + ''' + Iterates over the roles/tasks in a play, using the given (or default) + strategy for queueing tasks. The default is the linear strategy, which + operates like classic Ansible by keeping all hosts in lock-step with + a given task (meaning no hosts move on to the next task until all hosts + are done with the current task). + ''' + + connection_info = ConnectionInformation(play, self._options) + self._callback.set_connection_info(connection_info) + + # run final validation on the play now, to make sure fields are templated + # FIXME: is this even required? Everything is validated and merged at the + # task level, so else in the play needs to be templated + #all_vars = self._vmw.get_vars(loader=self._dlw, play=play) + #all_vars = self._vmw.get_vars(loader=self._loader, play=play) + #play.post_validate(all_vars=all_vars) + + self._callback.playbook_on_play_start(play.name) + + # initialize the shared dictionary containing the notified handlers + self._initialize_notified_handlers(play.handlers) + + # load the specified strategy (or the default linear one) + strategy = strategy_loader.get(play.strategy, self) + if strategy is None: + raise AnsibleError("Invalid play strategy specified: %s" % play.strategy, obj=play._ds) + + # build the iterator + iterator = PlayIterator(inventory=self._inventory, play=play) + + # and run the play using the strategy + return strategy.run(iterator, connection_info) + + def cleanup(self): + debug("RUNNING CLEANUP") + + self.terminate() + + self._final_q.close() + self._result_prc.terminate() + + for (worker_prc, main_q, rslt_q) in self._workers: + rslt_q.close() + main_q.close() + worker_prc.terminate() + + def get_inventory(self): + return self._inventory + + def get_callback(self): + return self._callback + + def get_variable_manager(self): + return self._variable_manager + + def get_loader(self): + return self._loader + + def get_server_pipe(self): + return self._server_pipe + + def get_client_pipe(self): + return self._client_pipe + + def get_pending_results(self): + return self._pending_results + + def get_allow_processing(self): + return self._allow_processing + + def get_notified_handlers(self): + return self._notified_handlers + + def get_workers(self): + return self._workers[:] + + def terminate(self): + self._terminated = True diff --git a/v2/ansible/executor/task_result.py b/v2/ansible/executor/task_result.py new file mode 100644 index 00000000000..2b760bac003 --- /dev/null +++ b/v2/ansible/executor/task_result.py @@ -0,0 +1,61 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.parsing import DataLoader + +class TaskResult: + ''' + This class is responsible for interpretting the resulting data + from an executed task, and provides helper methods for determining + the result of a given task. + ''' + + def __init__(self, host, task, return_data): + self._host = host + self._task = task + if isinstance(return_data, dict): + self._result = return_data.copy() + else: + self._result = DataLoader().load(return_data) + + def is_changed(self): + return self._check_key('changed') + + def is_skipped(self): + return self._check_key('skipped') + + def is_failed(self): + if 'failed_when_result' in self._result: + return self._check_key('failed_when_result') + else: + return self._check_key('failed') or self._result.get('rc', 0) != 0 + + def is_unreachable(self): + return self._check_key('unreachable') + + def _check_key(self, key): + if 'results' in self._result: + flag = False + for res in self._result.get('results', []): + if isinstance(res, dict): + flag |= res.get(key, False) + else: + return self._result.get(key, False) diff --git a/v2/ansible/inventory/__init__.py b/v2/ansible/inventory/__init__.py new file mode 100644 index 00000000000..c8e3cddebaa --- /dev/null +++ b/v2/ansible/inventory/__init__.py @@ -0,0 +1,669 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +############################################# +import fnmatch +import os +import sys +import re +import stat +import subprocess + +from ansible import constants as C +from ansible.errors import * + +from ansible.inventory.ini import InventoryParser +from ansible.inventory.script import InventoryScript +from ansible.inventory.dir import InventoryDirectory +from ansible.inventory.group import Group +from ansible.inventory.host import Host +from ansible.plugins import vars_loader +from ansible.utils.path import is_executable +from ansible.utils.vars import combine_vars + +class Inventory(object): + """ + Host inventory for ansible. + """ + + #__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset', + # 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list', + # '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir'] + + def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST): + + # the host file file, or script path, or list of hosts + # if a list, inventory data will NOT be loaded + self.host_list = host_list + self._loader = loader + self._variable_manager = variable_manager + + # caching to avoid repeated calculations, particularly with + # external inventory scripts. + + self._vars_per_host = {} + self._vars_per_group = {} + self._hosts_cache = {} + self._groups_list = {} + self._pattern_cache = {} + + # to be set by calling set_playbook_basedir by playbook code + self._playbook_basedir = None + + # the inventory object holds a list of groups + self.groups = [] + + # a list of host(names) to contain current inquiries to + self._restriction = None + self._also_restriction = None + self._subset = None + + if isinstance(host_list, basestring): + if "," in host_list: + host_list = host_list.split(",") + host_list = [ h for h in host_list if h and h.strip() ] + + if host_list is None: + self.parser = None + elif isinstance(host_list, list): + self.parser = None + all = Group('all') + self.groups = [ all ] + ipv6_re = re.compile('\[([a-f:A-F0-9]*[%[0-z]+]?)\](?::(\d+))?') + for x in host_list: + m = ipv6_re.match(x) + if m: + all.add_host(Host(m.groups()[0], m.groups()[1])) + else: + if ":" in x: + tokens = x.rsplit(":", 1) + # if there is ':' in the address, then this is an ipv6 + if ':' in tokens[0]: + all.add_host(Host(x)) + else: + all.add_host(Host(tokens[0], tokens[1])) + else: + all.add_host(Host(x)) + elif os.path.exists(host_list): + if os.path.isdir(host_list): + # Ensure basedir is inside the directory + self.host_list = os.path.join(self.host_list, "") + self.parser = InventoryDirectory(loader=self._loader, filename=host_list) + self.groups = self.parser.groups.values() + else: + # check to see if the specified file starts with a + # shebang (#!/), so if an error is raised by the parser + # class we can show a more apropos error + shebang_present = False + try: + inv_file = open(host_list) + first_line = inv_file.readlines()[0] + inv_file.close() + if first_line.startswith('#!'): + shebang_present = True + except: + pass + + if is_executable(host_list): + try: + self.parser = InventoryScript(loader=self._loader, filename=host_list) + self.groups = self.parser.groups.values() + except: + if not shebang_present: + raise errors.AnsibleError("The file %s is marked as executable, but failed to execute correctly. " % host_list + \ + "If this is not supposed to be an executable script, correct this with `chmod -x %s`." % host_list) + else: + raise + else: + try: + self.parser = InventoryParser(filename=host_list) + self.groups = self.parser.groups.values() + except: + if shebang_present: + raise errors.AnsibleError("The file %s looks like it should be an executable inventory script, but is not marked executable. " % host_list + \ + "Perhaps you want to correct this with `chmod +x %s`?" % host_list) + else: + raise + + vars_loader.add_directory(self.basedir(), with_subdir=True) + else: + raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?") + + self._vars_plugins = [ x for x in vars_loader.all(self) ] + + # FIXME: shouldn't be required, since the group/host vars file + # management will be done in VariableManager + # get group vars from group_vars/ files and vars plugins + for group in self.groups: + # FIXME: combine_vars + group.vars = combine_vars(group.vars, self.get_group_variables(group.name)) + + # get host vars from host_vars/ files and vars plugins + for host in self.get_hosts(): + # FIXME: combine_vars + host.vars = combine_vars(host.vars, self.get_host_variables(host.name)) + + + def _match(self, str, pattern_str): + try: + if pattern_str.startswith('~'): + return re.search(pattern_str[1:], str) + else: + return fnmatch.fnmatch(str, pattern_str) + except Exception, e: + raise errors.AnsibleError('invalid host pattern: %s' % pattern_str) + + def _match_list(self, items, item_attr, pattern_str): + results = [] + try: + if not pattern_str.startswith('~'): + pattern = re.compile(fnmatch.translate(pattern_str)) + else: + pattern = re.compile(pattern_str[1:]) + except Exception, e: + raise errors.AnsibleError('invalid host pattern: %s' % pattern_str) + + for item in items: + if pattern.match(getattr(item, item_attr)): + results.append(item) + return results + + def get_hosts(self, pattern="all"): + """ + find all host names matching a pattern string, taking into account any inventory restrictions or + applied subsets. + """ + + # process patterns + if isinstance(pattern, list): + pattern = ';'.join(pattern) + patterns = pattern.replace(";",":").split(":") + hosts = self._get_hosts(patterns) + + # exclude hosts not in a subset, if defined + if self._subset: + subset = self._get_hosts(self._subset) + hosts = [ h for h in hosts if h in subset ] + + # exclude hosts mentioned in any restriction (ex: failed hosts) + if self._restriction is not None: + hosts = [ h for h in hosts if h in self._restriction ] + if self._also_restriction is not None: + hosts = [ h for h in hosts if h in self._also_restriction ] + + return hosts + + def _get_hosts(self, patterns): + """ + finds hosts that match a list of patterns. Handles negative + matches as well as intersection matches. + """ + + # Host specifiers should be sorted to ensure consistent behavior + pattern_regular = [] + pattern_intersection = [] + pattern_exclude = [] + for p in patterns: + if p.startswith("!"): + pattern_exclude.append(p) + elif p.startswith("&"): + pattern_intersection.append(p) + elif p: + pattern_regular.append(p) + + # if no regular pattern was given, hence only exclude and/or intersection + # make that magically work + if pattern_regular == []: + pattern_regular = ['all'] + + # when applying the host selectors, run those without the "&" or "!" + # first, then the &s, then the !s. + patterns = pattern_regular + pattern_intersection + pattern_exclude + + hosts = [] + + for p in patterns: + # avoid resolving a pattern that is a plain host + if p in self._hosts_cache: + hosts.append(self.get_host(p)) + else: + that = self.__get_hosts(p) + if p.startswith("!"): + hosts = [ h for h in hosts if h not in that ] + elif p.startswith("&"): + hosts = [ h for h in hosts if h in that ] + else: + to_append = [ h for h in that if h.name not in [ y.name for y in hosts ] ] + hosts.extend(to_append) + return hosts + + def __get_hosts(self, pattern): + """ + finds hosts that positively match a particular pattern. Does not + take into account negative matches. + """ + + if pattern in self._pattern_cache: + return self._pattern_cache[pattern] + + (name, enumeration_details) = self._enumeration_info(pattern) + hpat = self._hosts_in_unenumerated_pattern(name) + result = self._apply_ranges(pattern, hpat) + self._pattern_cache[pattern] = result + return result + + def _enumeration_info(self, pattern): + """ + returns (pattern, limits) taking a regular pattern and finding out + which parts of it correspond to start/stop offsets. limits is + a tuple of (start, stop) or None + """ + + # Do not parse regexes for enumeration info + if pattern.startswith('~'): + return (pattern, None) + + # The regex used to match on the range, which can be [x] or [x-y]. + pattern_re = re.compile("^(.*)\[([-]?[0-9]+)(?:(?:-)([0-9]+))?\](.*)$") + m = pattern_re.match(pattern) + if m: + (target, first, last, rest) = m.groups() + first = int(first) + if last: + if first < 0: + raise errors.AnsibleError("invalid range: negative indices cannot be used as the first item in a range") + last = int(last) + else: + last = first + return (target, (first, last)) + else: + return (pattern, None) + + def _apply_ranges(self, pat, hosts): + """ + given a pattern like foo, that matches hosts, return all of hosts + given a pattern like foo[0:5], where foo matches hosts, return the first 6 hosts + """ + + # If there are no hosts to select from, just return the + # empty set. This prevents trying to do selections on an empty set. + # issue#6258 + if not hosts: + return hosts + + (loose_pattern, limits) = self._enumeration_info(pat) + if not limits: + return hosts + + (left, right) = limits + + if left == '': + left = 0 + if right == '': + right = 0 + left=int(left) + right=int(right) + try: + if left != right: + return hosts[left:right] + else: + return [ hosts[left] ] + except IndexError: + raise errors.AnsibleError("no hosts matching the pattern '%s' were found" % pat) + + def _create_implicit_localhost(self, pattern): + new_host = Host(pattern) + new_host.set_variable("ansible_python_interpreter", sys.executable) + new_host.set_variable("ansible_connection", "local") + new_host.ipv4_address = '127.0.0.1' + + ungrouped = self.get_group("ungrouped") + if ungrouped is None: + self.add_group(Group('ungrouped')) + ungrouped = self.get_group('ungrouped') + self.get_group('all').add_child_group(ungrouped) + ungrouped.add_host(new_host) + return new_host + + def _hosts_in_unenumerated_pattern(self, pattern): + """ Get all host names matching the pattern """ + + results = [] + hosts = [] + hostnames = set() + + # ignore any negative checks here, this is handled elsewhere + pattern = pattern.replace("!","").replace("&", "") + + def __append_host_to_results(host): + if host not in results and host.name not in hostnames: + hostnames.add(host.name) + results.append(host) + + groups = self.get_groups() + for group in groups: + if pattern == 'all': + for host in group.get_hosts(): + __append_host_to_results(host) + else: + if self._match(group.name, pattern): + for host in group.get_hosts(): + __append_host_to_results(host) + else: + matching_hosts = self._match_list(group.get_hosts(), 'name', pattern) + for host in matching_hosts: + __append_host_to_results(host) + + if pattern in ["localhost", "127.0.0.1"] and len(results) == 0: + new_host = self._create_implicit_localhost(pattern) + results.append(new_host) + return results + + def clear_pattern_cache(self): + ''' called exclusively by the add_host plugin to allow patterns to be recalculated ''' + self._pattern_cache = {} + + def groups_for_host(self, host): + if host in self._hosts_cache: + return self._hosts_cache[host].get_groups() + else: + return [] + + def groups_list(self): + if not self._groups_list: + groups = {} + for g in self.groups: + groups[g.name] = [h.name for h in g.get_hosts()] + ancestors = g.get_ancestors() + for a in ancestors: + if a.name not in groups: + groups[a.name] = [h.name for h in a.get_hosts()] + self._groups_list = groups + return self._groups_list + + def get_groups(self): + return self.groups + + def get_host(self, hostname): + if hostname not in self._hosts_cache: + self._hosts_cache[hostname] = self._get_host(hostname) + return self._hosts_cache[hostname] + + def _get_host(self, hostname): + if hostname in ['localhost','127.0.0.1']: + for host in self.get_group('all').get_hosts(): + if host.name in ['localhost', '127.0.0.1']: + return host + return self._create_implicit_localhost(hostname) + else: + for group in self.groups: + for host in group.get_hosts(): + if hostname == host.name: + return host + return None + + def get_group(self, groupname): + for group in self.groups: + if group.name == groupname: + return group + return None + + def get_group_variables(self, groupname, update_cached=False, vault_password=None): + if groupname not in self._vars_per_group or update_cached: + self._vars_per_group[groupname] = self._get_group_variables(groupname, vault_password=vault_password) + return self._vars_per_group[groupname] + + def _get_group_variables(self, groupname, vault_password=None): + + group = self.get_group(groupname) + if group is None: + raise Exception("group not found: %s" % groupname) + + vars = {} + + # plugin.get_group_vars retrieves just vars for specific group + vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')] + for updated in vars_results: + if updated is not None: + # FIXME: combine_vars + vars = combine_vars(vars, updated) + + # Read group_vars/ files + # FIXME: combine_vars + vars = combine_vars(vars, self.get_group_vars(group)) + + return vars + + def get_vars(self, hostname, update_cached=False, vault_password=None): + + host = self.get_host(hostname) + if not host: + raise Exception("host not found: %s" % hostname) + return host.get_vars() + + def get_host_variables(self, hostname, update_cached=False, vault_password=None): + + if hostname not in self._vars_per_host or update_cached: + self._vars_per_host[hostname] = self._get_host_variables(hostname, vault_password=vault_password) + return self._vars_per_host[hostname] + + def _get_host_variables(self, hostname, vault_password=None): + + host = self.get_host(hostname) + if host is None: + raise errors.AnsibleError("host not found: %s" % hostname) + + vars = {} + + # plugin.run retrieves all vars (also from groups) for host + vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')] + for updated in vars_results: + if updated is not None: + # FIXME: combine_vars + vars = combine_vars(vars, updated) + + # plugin.get_host_vars retrieves just vars for specific host + vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')] + for updated in vars_results: + if updated is not None: + # FIXME: combine_vars + vars = combine_vars(vars, updated) + + # still need to check InventoryParser per host vars + # which actually means InventoryScript per host, + # which is not performant + if self.parser is not None: + # FIXME: combine_vars + vars = combine_vars(vars, self.parser.get_host_variables(host)) + + # Read host_vars/ files + # FIXME: combine_vars + vars = combine_vars(vars, self.get_host_vars(host)) + + return vars + + def add_group(self, group): + if group.name not in self.groups_list(): + self.groups.append(group) + self._groups_list = None # invalidate internal cache + else: + raise errors.AnsibleError("group already in inventory: %s" % group.name) + + def list_hosts(self, pattern="all"): + + """ return a list of hostnames for a pattern """ + + result = [ h for h in self.get_hosts(pattern) ] + if len(result) == 0 and pattern in ["localhost", "127.0.0.1"]: + result = [pattern] + return result + + def list_groups(self): + return sorted([ g.name for g in self.groups ], key=lambda x: x) + + def restrict_to_hosts(self, restriction): + """ + Restrict list operations to the hosts given in restriction. This is used + to exclude failed hosts in main playbook code, don't use this for other + reasons. + """ + if not isinstance(restriction, list): + restriction = [ restriction ] + self._restriction = restriction + + def also_restrict_to(self, restriction): + """ + Works like restict_to but offers an additional restriction. Playbooks use this + to implement serial behavior. + """ + if not isinstance(restriction, list): + restriction = [ restriction ] + self._also_restriction = restriction + + def subset(self, subset_pattern): + """ + Limits inventory results to a subset of inventory that matches a given + pattern, such as to select a given geographic of numeric slice amongst + a previous 'hosts' selection that only select roles, or vice versa. + Corresponds to --limit parameter to ansible-playbook + """ + if subset_pattern is None: + self._subset = None + else: + subset_pattern = subset_pattern.replace(',',':') + subset_pattern = subset_pattern.replace(";",":").split(":") + results = [] + # allow Unix style @filename data + for x in subset_pattern: + if x.startswith("@"): + fd = open(x[1:]) + results.extend(fd.read().split("\n")) + fd.close() + else: + results.append(x) + self._subset = results + + def remove_restriction(self): + """ Do not restrict list operations """ + self._restriction = None + + def lift_also_restriction(self): + """ Clears the also restriction """ + self._also_restriction = None + + def is_file(self): + """ did inventory come from a file? """ + if not isinstance(self.host_list, basestring): + return False + return os.path.exists(self.host_list) + + def basedir(self): + """ if inventory came from a file, what's the directory? """ + if not self.is_file(): + return None + dname = os.path.dirname(self.host_list) + if dname is None or dname == '' or dname == '.': + cwd = os.getcwd() + return os.path.abspath(cwd) + return os.path.abspath(dname) + + def src(self): + """ if inventory came from a file, what's the directory and file name? """ + if not self.is_file(): + return None + return self.host_list + + def playbook_basedir(self): + """ returns the directory of the current playbook """ + return self._playbook_basedir + + def set_playbook_basedir(self, dir): + """ + sets the base directory of the playbook so inventory can use it as a + basedir for host_ and group_vars, and other things. + """ + # Only update things if dir is a different playbook basedir + if dir != self._playbook_basedir: + self._playbook_basedir = dir + # get group vars from group_vars/ files + for group in self.groups: + # FIXME: combine_vars + group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True)) + # get host vars from host_vars/ files + for host in self.get_hosts(): + # FIXME: combine_vars + host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True)) + # invalidate cache + self._vars_per_host = {} + self._vars_per_group = {} + + def get_host_vars(self, host, new_pb_basedir=False): + """ Read host_vars/ files """ + return self._get_hostgroup_vars(host=host, group=None, new_pb_basedir=new_pb_basedir) + + def get_group_vars(self, group, new_pb_basedir=False): + """ Read group_vars/ files """ + return self._get_hostgroup_vars(host=None, group=group, new_pb_basedir=new_pb_basedir) + + def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False): + """ + Loads variables from group_vars/ and host_vars/ in directories parallel + to the inventory base directory or in the same directory as the playbook. Variables in the playbook + dir will win over the inventory dir if files are in both. + """ + + results = {} + scan_pass = 0 + _basedir = self.basedir() + + # look in both the inventory base directory and the playbook base directory + # unless we do an update for a new playbook base dir + if not new_pb_basedir: + basedirs = [_basedir, self._playbook_basedir] + else: + basedirs = [self._playbook_basedir] + + for basedir in basedirs: + + # this can happen from particular API usages, particularly if not run + # from /usr/bin/ansible-playbook + if basedir is None: + continue + + scan_pass = scan_pass + 1 + + # it's not an eror if the directory does not exist, keep moving + if not os.path.exists(basedir): + continue + + # save work of second scan if the directories are the same + if _basedir == self._playbook_basedir and scan_pass != 1: + continue + + # FIXME: these should go to VariableManager + if group and host is None: + # load vars in dir/group_vars/name_of_group + base_path = os.path.join(basedir, "group_vars/%s" % group.name) + self._variable_manager.add_group_vars_file(base_path, self._loader) + elif host and group is None: + # same for hostvars in dir/host_vars/name_of_host + base_path = os.path.join(basedir, "host_vars/%s" % host.name) + self._variable_manager.add_host_vars_file(base_path, self._loader) + + # all done, results is a dictionary of variables for this particular host. + return results + diff --git a/v2/ansible/inventory/dir.py b/v2/ansible/inventory/dir.py new file mode 100644 index 00000000000..52f7af8b53f --- /dev/null +++ b/v2/ansible/inventory/dir.py @@ -0,0 +1,234 @@ +# (c) 2013, Daniel Hokka Zakrisson +# (c) 2014, Serge van Ginderachter +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +############################################# + +import os + +from ansible import constants as C +from ansible.errors import * + +from ansible.inventory.host import Host +from ansible.inventory.group import Group +from ansible.inventory.ini import InventoryParser +from ansible.inventory.script import InventoryScript +from ansible.utils.path import is_executable +from ansible.utils.vars import combine_vars + +class InventoryDirectory(object): + ''' Host inventory parser for ansible using a directory of inventories. ''' + + def __init__(self, loader, filename=C.DEFAULT_HOST_LIST): + self.names = os.listdir(filename) + self.names.sort() + self.directory = filename + self.parsers = [] + self.hosts = {} + self.groups = {} + + self._loader = loader + + for i in self.names: + + # Skip files that end with certain extensions or characters + if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo")): + continue + # Skip hidden files + if i.startswith('.') and not i.startswith('./'): + continue + # These are things inside of an inventory basedir + if i in ("host_vars", "group_vars", "vars_plugins"): + continue + fullpath = os.path.join(self.directory, i) + if os.path.isdir(fullpath): + parser = InventoryDirectory(loader=loader, filename=fullpath) + elif is_executable(fullpath): + parser = InventoryScript(loader=loader, filename=fullpath) + else: + parser = InventoryParser(filename=fullpath) + self.parsers.append(parser) + + # retrieve all groups and hosts form the parser and add them to + # self, don't look at group lists yet, to avoid + # recursion trouble, but just make sure all objects exist in self + newgroups = parser.groups.values() + for group in newgroups: + for host in group.hosts: + self._add_host(host) + for group in newgroups: + self._add_group(group) + + # now check the objects lists so they contain only objects from + # self; membership data in groups is already fine (except all & + # ungrouped, see later), but might still reference objects not in self + for group in self.groups.values(): + # iterate on a copy of the lists, as those lists get changed in + # the loop + # list with group's child group objects: + for child in group.child_groups[:]: + if child != self.groups[child.name]: + group.child_groups.remove(child) + group.child_groups.append(self.groups[child.name]) + # list with group's parent group objects: + for parent in group.parent_groups[:]: + if parent != self.groups[parent.name]: + group.parent_groups.remove(parent) + group.parent_groups.append(self.groups[parent.name]) + # list with group's host objects: + for host in group.hosts[:]: + if host != self.hosts[host.name]: + group.hosts.remove(host) + group.hosts.append(self.hosts[host.name]) + # also check here that the group that contains host, is + # also contained in the host's group list + if group not in self.hosts[host.name].groups: + self.hosts[host.name].groups.append(group) + + # extra checks on special groups all and ungrouped + # remove hosts from 'ungrouped' if they became member of other groups + if 'ungrouped' in self.groups: + ungrouped = self.groups['ungrouped'] + # loop on a copy of ungrouped hosts, as we want to change that list + for host in ungrouped.hosts[:]: + if len(host.groups) > 1: + host.groups.remove(ungrouped) + ungrouped.hosts.remove(host) + + # remove hosts from 'all' if they became member of other groups + # all should only contain direct children, not grandchildren + # direct children should have dept == 1 + if 'all' in self.groups: + allgroup = self.groups['all' ] + # loop on a copy of all's child groups, as we want to change that list + for group in allgroup.child_groups[:]: + # groups might once have beeen added to all, and later be added + # to another group: we need to remove the link wit all then + if len(group.parent_groups) > 1 and allgroup in group.parent_groups: + # real children of all have just 1 parent, all + # this one has more, so not a direct child of all anymore + group.parent_groups.remove(allgroup) + allgroup.child_groups.remove(group) + elif allgroup not in group.parent_groups: + # this group was once added to all, but doesn't list it as + # a parent any more; the info in the group is the correct + # info + allgroup.child_groups.remove(group) + + + def _add_group(self, group): + """ Merge an existing group or add a new one; + Track parent and child groups, and hosts of the new one """ + + if group.name not in self.groups: + # it's brand new, add him! + self.groups[group.name] = group + if self.groups[group.name] != group: + # different object, merge + self._merge_groups(self.groups[group.name], group) + + def _add_host(self, host): + if host.name not in self.hosts: + # Papa's got a brand new host + self.hosts[host.name] = host + if self.hosts[host.name] != host: + # different object, merge + self._merge_hosts(self.hosts[host.name], host) + + def _merge_groups(self, group, newgroup): + """ Merge all of instance newgroup into group, + update parent/child relationships + group lists may still contain group objects that exist in self with + same name, but was instanciated as a different object in some other + inventory parser; these are handled later """ + + # name + if group.name != newgroup.name: + raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name)) + + # depth + group.depth = max([group.depth, newgroup.depth]) + + # hosts list (host objects are by now already added to self.hosts) + for host in newgroup.hosts: + grouphosts = dict([(h.name, h) for h in group.hosts]) + if host.name in grouphosts: + # same host name but different object, merge + self._merge_hosts(grouphosts[host.name], host) + else: + # new membership, add host to group from self + # group from self will also be added again to host.groups, but + # as different object + group.add_host(self.hosts[host.name]) + # now remove this the old object for group in host.groups + for hostgroup in [g for g in host.groups]: + if hostgroup.name == group.name and hostgroup != self.groups[group.name]: + self.hosts[host.name].groups.remove(hostgroup) + + + # group child membership relation + for newchild in newgroup.child_groups: + # dict with existing child groups: + childgroups = dict([(g.name, g) for g in group.child_groups]) + # check if child of new group is already known as a child + if newchild.name not in childgroups: + self.groups[group.name].add_child_group(newchild) + + # group parent membership relation + for newparent in newgroup.parent_groups: + # dict with existing parent groups: + parentgroups = dict([(g.name, g) for g in group.parent_groups]) + # check if parent of new group is already known as a parent + if newparent.name not in parentgroups: + if newparent.name not in self.groups: + # group does not exist yet in self, import him + self.groups[newparent.name] = newparent + # group now exists but not yet as a parent here + self.groups[newparent.name].add_child_group(group) + + # variables + group.vars = combine_vars(group.vars, newgroup.vars) + + def _merge_hosts(self,host, newhost): + """ Merge all of instance newhost into host """ + + # name + if host.name != newhost.name: + raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name)) + + # group membership relation + for newgroup in newhost.groups: + # dict with existing groups: + hostgroups = dict([(g.name, g) for g in host.groups]) + # check if new group is already known as a group + if newgroup.name not in hostgroups: + if newgroup.name not in self.groups: + # group does not exist yet in self, import him + self.groups[newgroup.name] = newgroup + # group now exists but doesn't have host yet + self.groups[newgroup.name].add_host(host) + + # variables + host.vars = combine_vars(host.vars, newhost.vars) + + def get_host_variables(self, host): + """ Gets additional host variables from all inventories """ + vars = {} + for i in self.parsers: + vars.update(i.get_host_variables(host)) + return vars + diff --git a/v2/ansible/inventory/expand_hosts.py b/v2/ansible/inventory/expand_hosts.py new file mode 100644 index 00000000000..f1297409355 --- /dev/null +++ b/v2/ansible/inventory/expand_hosts.py @@ -0,0 +1,116 @@ +# (c) 2012, Zettar Inc. +# Written by Chin Fang +# +# This file is part of Ansible +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . +# + +''' +This module is for enhancing ansible's inventory parsing capability such +that it can deal with hostnames specified using a simple pattern in the +form of [beg:end], example: [1:5], [a:c], [D:G]. If beg is not specified, +it defaults to 0. + +If beg is given and is left-zero-padded, e.g. '001', it is taken as a +formatting hint when the range is expanded. e.g. [001:010] is to be +expanded into 001, 002 ...009, 010. + +Note that when beg is specified with left zero padding, then the length of +end must be the same as that of beg, else an exception is raised. +''' +import string + +from ansible import errors + +def detect_range(line = None): + ''' + A helper function that checks a given host line to see if it contains + a range pattern described in the docstring above. + + Returnes True if the given line contains a pattern, else False. + ''' + if 0 <= line.find("[") < line.find(":") < line.find("]"): + return True + else: + return False + +def expand_hostname_range(line = None): + ''' + A helper function that expands a given line that contains a pattern + specified in top docstring, and returns a list that consists of the + expanded version. + + The '[' and ']' characters are used to maintain the pseudo-code + appearance. They are replaced in this function with '|' to ease + string splitting. + + References: http://ansible.github.com/patterns.html#hosts-and-groups + ''' + all_hosts = [] + if line: + # A hostname such as db[1:6]-node is considered to consists + # three parts: + # head: 'db' + # nrange: [1:6]; range() is a built-in. Can't use the name + # tail: '-node' + + # Add support for multiple ranges in a host so: + # db[01:10:3]node-[01:10] + # - to do this we split off at the first [...] set, getting the list + # of hosts and then repeat until none left. + # - also add an optional third parameter which contains the step. (Default: 1) + # so range can be [01:10:2] -> 01 03 05 07 09 + # FIXME: make this work for alphabetic sequences too. + + (head, nrange, tail) = line.replace('[','|',1).replace(']','|',1).split('|') + bounds = nrange.split(":") + if len(bounds) != 2 and len(bounds) != 3: + raise errors.AnsibleError("host range incorrectly specified") + beg = bounds[0] + end = bounds[1] + if len(bounds) == 2: + step = 1 + else: + step = bounds[2] + if not beg: + beg = "0" + if not end: + raise errors.AnsibleError("host range end value missing") + if beg[0] == '0' and len(beg) > 1: + rlen = len(beg) # range length formatting hint + if rlen != len(end): + raise errors.AnsibleError("host range format incorrectly specified!") + fill = lambda _: str(_).zfill(rlen) # range sequence + else: + fill = str + + try: + i_beg = string.ascii_letters.index(beg) + i_end = string.ascii_letters.index(end) + if i_beg > i_end: + raise errors.AnsibleError("host range format incorrectly specified!") + seq = string.ascii_letters[i_beg:i_end+1] + except ValueError: # not an alpha range + seq = range(int(beg), int(end)+1, int(step)) + + for rseq in seq: + hname = ''.join((head, fill(rseq), tail)) + + if detect_range(hname): + all_hosts.extend( expand_hostname_range( hname ) ) + else: + all_hosts.append(hname) + + return all_hosts diff --git a/v2/ansible/inventory/group.py b/v2/ansible/inventory/group.py new file mode 100644 index 00000000000..87d6f64dfc6 --- /dev/null +++ b/v2/ansible/inventory/group.py @@ -0,0 +1,159 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.utils.debug import debug + +class Group: + ''' a group of ansible hosts ''' + + #__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ] + + def __init__(self, name=None): + + self.depth = 0 + self.name = name + self.hosts = [] + self.vars = {} + self.child_groups = [] + self.parent_groups = [] + self._hosts_cache = None + + #self.clear_hosts_cache() + #if self.name is None: + # raise Exception("group name is required") + + def __repr__(self): + return self.get_name() + + def __getstate__(self): + return self.serialize() + + def __setstate__(self, data): + return self.deserialize(data) + + def serialize(self): + parent_groups = [] + for parent in self.parent_groups: + parent_groups.append(parent.serialize()) + + result = dict( + name=self.name, + vars=self.vars.copy(), + parent_groups=parent_groups, + depth=self.depth, + ) + + debug("serializing group, result is: %s" % result) + return result + + def deserialize(self, data): + debug("deserializing group, data is: %s" % data) + self.__init__() + self.name = data.get('name') + self.vars = data.get('vars', dict()) + + parent_groups = data.get('parent_groups', []) + for parent_data in parent_groups: + g = Group() + g.deserialize(parent_data) + self.parent_groups.append(g) + + def get_name(self): + return self.name + + def add_child_group(self, group): + + if self == group: + raise Exception("can't add group to itself") + + # don't add if it's already there + if not group in self.child_groups: + self.child_groups.append(group) + + # update the depth of the child + group.depth = max([self.depth+1, group.depth]) + + # update the depth of the grandchildren + group._check_children_depth() + + # now add self to child's parent_groups list, but only if there + # isn't already a group with the same name + if not self.name in [g.name for g in group.parent_groups]: + group.parent_groups.append(self) + + self.clear_hosts_cache() + + def _check_children_depth(self): + + for group in self.child_groups: + group.depth = max([self.depth+1, group.depth]) + group._check_children_depth() + + def add_host(self, host): + + self.hosts.append(host) + host.add_group(self) + self.clear_hosts_cache() + + def set_variable(self, key, value): + + self.vars[key] = value + + def clear_hosts_cache(self): + + self._hosts_cache = None + for g in self.parent_groups: + g.clear_hosts_cache() + + def get_hosts(self): + + if self._hosts_cache is None: + self._hosts_cache = self._get_hosts() + + return self._hosts_cache + + def _get_hosts(self): + + hosts = [] + seen = {} + for kid in self.child_groups: + kid_hosts = kid.get_hosts() + for kk in kid_hosts: + if kk not in seen: + seen[kk] = 1 + hosts.append(kk) + for mine in self.hosts: + if mine not in seen: + seen[mine] = 1 + hosts.append(mine) + return hosts + + def get_vars(self): + return self.vars.copy() + + def _get_ancestors(self): + + results = {} + for g in self.parent_groups: + results[g.name] = g + results.update(g._get_ancestors()) + return results + + def get_ancestors(self): + + return self._get_ancestors().values() + diff --git a/v2/ansible/inventory/host.py b/v2/ansible/inventory/host.py new file mode 100644 index 00000000000..414ec34b96e --- /dev/null +++ b/v2/ansible/inventory/host.py @@ -0,0 +1,127 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible import constants as C +from ansible.inventory.group import Group +from ansible.utils.vars import combine_vars + +__all__ = ['Host'] + +class Host: + ''' a single ansible host ''' + + #__slots__ = [ 'name', 'vars', 'groups' ] + + def __getstate__(self): + return self.serialize() + + def __setstate__(self, data): + return self.deserialize(data) + + def serialize(self): + groups = [] + for group in self.groups: + groups.append(group.serialize()) + + return dict( + name=self.name, + vars=self.vars.copy(), + ipv4_address=self.ipv4_address, + ipv6_address=self.ipv6_address, + port=self.port, + gathered_facts=self._gathered_facts, + groups=groups, + ) + + def deserialize(self, data): + self.__init__() + + self.name = data.get('name') + self.vars = data.get('vars', dict()) + self.ipv4_address = data.get('ipv4_address', '') + self.ipv6_address = data.get('ipv6_address', '') + self.port = data.get('port') + + groups = data.get('groups', []) + for group_data in groups: + g = Group() + g.deserialize(group_data) + self.groups.append(g) + + def __init__(self, name=None, port=None): + + self.name = name + self.vars = {} + self.groups = [] + + self.ipv4_address = name + self.ipv6_address = name + + if port and port != C.DEFAULT_REMOTE_PORT: + self.port = int(port) + else: + self.port = C.DEFAULT_REMOTE_PORT + + self._gathered_facts = False + + def __repr__(self): + return self.get_name() + + def get_name(self): + return self.name + + @property + def gathered_facts(self): + return self._gathered_facts + + def set_gathered_facts(self, gathered): + self._gathered_facts = gathered + + def add_group(self, group): + + self.groups.append(group) + + def set_variable(self, key, value): + + self.vars[key]=value + + def get_groups(self): + + groups = {} + for g in self.groups: + groups[g.name] = g + ancestors = g.get_ancestors() + for a in ancestors: + groups[a.name] = a + return groups.values() + + def get_vars(self): + + results = {} + groups = self.get_groups() + for group in sorted(groups, key=lambda g: g.depth): + results = combine_vars(results, group.get_vars()) + results = combine_vars(results, self.vars) + results['inventory_hostname'] = self.name + results['inventory_hostname_short'] = self.name.split('.')[0] + results['group_names'] = sorted([ g.name for g in groups if g.name != 'all']) + return results + diff --git a/v2/ansible/inventory/ini.py b/v2/ansible/inventory/ini.py new file mode 100644 index 00000000000..075701c056c --- /dev/null +++ b/v2/ansible/inventory/ini.py @@ -0,0 +1,215 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +############################################# + +import ast +import shlex +import re + +from ansible import constants as C +from ansible.errors import * +from ansible.inventory.host import Host +from ansible.inventory.group import Group +from ansible.inventory.expand_hosts import detect_range +from ansible.inventory.expand_hosts import expand_hostname_range + +class InventoryParser(object): + """ + Host inventory for ansible. + """ + + def __init__(self, filename=C.DEFAULT_HOST_LIST): + self.filename = filename + with open(filename) as fh: + self.lines = fh.readlines() + self.groups = {} + self.hosts = {} + self._parse() + + def _parse(self): + + self._parse_base_groups() + self._parse_group_children() + self._add_allgroup_children() + self._parse_group_variables() + return self.groups + + @staticmethod + def _parse_value(v): + if "#" not in v: + try: + return ast.literal_eval(v) + # Using explicit exceptions. + # Likely a string that literal_eval does not like. We wil then just set it. + except ValueError: + # For some reason this was thought to be malformed. + pass + except SyntaxError: + # Is this a hash with an equals at the end? + pass + return v + + # [webservers] + # alpha + # beta:2345 + # gamma sudo=True user=root + # delta asdf=jkl favcolor=red + + def _add_allgroup_children(self): + + for group in self.groups.values(): + if group.depth == 0 and group.name != 'all': + self.groups['all'].add_child_group(group) + + + def _parse_base_groups(self): + # FIXME: refactor + + ungrouped = Group(name='ungrouped') + all = Group(name='all') + all.add_child_group(ungrouped) + + self.groups = dict(all=all, ungrouped=ungrouped) + active_group_name = 'ungrouped' + + for line in self.lines: + line = self._before_comment(line).strip() + if line.startswith("[") and line.endswith("]"): + active_group_name = line.replace("[","").replace("]","") + if ":vars" in line or ":children" in line: + active_group_name = active_group_name.rsplit(":", 1)[0] + if active_group_name not in self.groups: + new_group = self.groups[active_group_name] = Group(name=active_group_name) + active_group_name = None + elif active_group_name not in self.groups: + new_group = self.groups[active_group_name] = Group(name=active_group_name) + elif line.startswith(";") or line == '': + pass + elif active_group_name: + tokens = shlex.split(line) + if len(tokens) == 0: + continue + hostname = tokens[0] + port = C.DEFAULT_REMOTE_PORT + # Three cases to check: + # 0. A hostname that contains a range pesudo-code and a port + # 1. A hostname that contains just a port + if hostname.count(":") > 1: + # Possible an IPv6 address, or maybe a host line with multiple ranges + # IPv6 with Port XXX:XXX::XXX.port + # FQDN foo.example.com + if hostname.count(".") == 1: + (hostname, port) = hostname.rsplit(".", 1) + elif ("[" in hostname and + "]" in hostname and + ":" in hostname and + (hostname.rindex("]") < hostname.rindex(":")) or + ("]" not in hostname and ":" in hostname)): + (hostname, port) = hostname.rsplit(":", 1) + + hostnames = [] + if detect_range(hostname): + hostnames = expand_hostname_range(hostname) + else: + hostnames = [hostname] + + for hn in hostnames: + host = None + if hn in self.hosts: + host = self.hosts[hn] + else: + host = Host(name=hn, port=port) + self.hosts[hn] = host + if len(tokens) > 1: + for t in tokens[1:]: + if t.startswith('#'): + break + try: + (k,v) = t.split("=", 1) + except ValueError, e: + raise AnsibleError("Invalid ini entry in %s: %s - %s" % (self.filename, t, str(e))) + if k == 'ansible_ssh_host': + host.ipv4_address = self._parse_value(v) + else: + host.set_variable(k, self._parse_value(v)) + self.groups[active_group_name].add_host(host) + + # [southeast:children] + # atlanta + # raleigh + + def _parse_group_children(self): + group = None + + for line in self.lines: + line = line.strip() + if line is None or line == '': + continue + if line.startswith("[") and ":children]" in line: + line = line.replace("[","").replace(":children]","") + group = self.groups.get(line, None) + if group is None: + group = self.groups[line] = Group(name=line) + elif line.startswith("#") or line.startswith(";"): + pass + elif line.startswith("["): + group = None + elif group: + kid_group = self.groups.get(line, None) + if kid_group is None: + raise AnsibleError("child group is not defined: (%s)" % line) + else: + group.add_child_group(kid_group) + + + # [webservers:vars] + # http_port=1234 + # maxRequestsPerChild=200 + + def _parse_group_variables(self): + group = None + for line in self.lines: + line = line.strip() + if line.startswith("[") and ":vars]" in line: + line = line.replace("[","").replace(":vars]","") + group = self.groups.get(line, None) + if group is None: + raise AnsibleError("can't add vars to undefined group: %s" % line) + elif line.startswith("#") or line.startswith(";"): + pass + elif line.startswith("["): + group = None + elif line == '': + pass + elif group: + if "=" not in line: + raise AnsibleError("variables assigned to group must be in key=value form") + else: + (k, v) = [e.strip() for e in line.split("=", 1)] + group.set_variable(k, self._parse_value(v)) + + def get_host_variables(self, host): + return {} + + def _before_comment(self, msg): + ''' what's the part of a string before a comment? ''' + msg = msg.replace("\#","**NOT_A_COMMENT**") + msg = msg.split("#")[0] + msg = msg.replace("**NOT_A_COMMENT**","#") + return msg + diff --git a/v2/ansible/inventory/script.py b/v2/ansible/inventory/script.py new file mode 100644 index 00000000000..13b53a24f5e --- /dev/null +++ b/v2/ansible/inventory/script.py @@ -0,0 +1,156 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +############################################# + +import os +import subprocess +import sys + +from ansible import constants as C +from ansible.errors import * +from ansible.inventory.host import Host +from ansible.inventory.group import Group +from ansible.module_utils.basic import json_dict_bytes_to_unicode + + +class InventoryScript: + ''' Host inventory parser for ansible using external inventory scripts. ''' + + def __init__(self, loader, filename=C.DEFAULT_HOST_LIST): + + self._loader = loader + + # Support inventory scripts that are not prefixed with some + # path information but happen to be in the current working + # directory when '.' is not in PATH. + self.filename = os.path.abspath(filename) + cmd = [ self.filename, "--list" ] + try: + sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except OSError, e: + raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) + (stdout, stderr) = sp.communicate() + + if sp.returncode != 0: + raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr)) + + self.data = stdout + # see comment about _meta below + self.host_vars_from_top = None + self.groups = self._parse(stderr) + + + def _parse(self, err): + + all_hosts = {} + + # not passing from_remote because data from CMDB is trusted + self.raw = self._loader.load(self.data) + self.raw = json_dict_bytes_to_unicode(self.raw) + + all = Group('all') + groups = dict(all=all) + group = None + + + if 'failed' in self.raw: + sys.stderr.write(err + "\n") + raise AnsibleError("failed to parse executable inventory script results: %s" % self.raw) + + for (group_name, data) in self.raw.items(): + + # in Ansible 1.3 and later, a "_meta" subelement may contain + # a variable "hostvars" which contains a hash for each host + # if this "hostvars" exists at all then do not call --host for each + # host. This is for efficiency and scripts should still return data + # if called with --host for backwards compat with 1.2 and earlier. + + if group_name == '_meta': + if 'hostvars' in data: + self.host_vars_from_top = data['hostvars'] + continue + + if group_name != all.name: + group = groups[group_name] = Group(group_name) + else: + group = all + host = None + + if not isinstance(data, dict): + data = {'hosts': data} + # is not those subkeys, then simplified syntax, host with vars + elif not any(k in data for k in ('hosts','vars')): + data = {'hosts': [group_name], 'vars': data} + + if 'hosts' in data: + if not isinstance(data['hosts'], list): + raise AnsibleError("You defined a group \"%s\" with bad " + "data for the host list:\n %s" % (group_name, data)) + + for hostname in data['hosts']: + if not hostname in all_hosts: + all_hosts[hostname] = Host(hostname) + host = all_hosts[hostname] + group.add_host(host) + + if 'vars' in data: + if not isinstance(data['vars'], dict): + raise AnsibleError("You defined a group \"%s\" with bad " + "data for variables:\n %s" % (group_name, data)) + + for k, v in data['vars'].iteritems(): + if group.name == all.name: + all.set_variable(k, v) + else: + group.set_variable(k, v) + + # Separate loop to ensure all groups are defined + for (group_name, data) in self.raw.items(): + if group_name == '_meta': + continue + if isinstance(data, dict) and 'children' in data: + for child_name in data['children']: + if child_name in groups: + groups[group_name].add_child_group(groups[child_name]) + + for group in groups.values(): + if group.depth == 0 and group.name != 'all': + all.add_child_group(group) + + return groups + + def get_host_variables(self, host): + """ Runs