Compare commits

...

454 commits

Author SHA1 Message Date
James Cammarata 7baf994f2c New release v2.1.6.0-1 2017-06-01 13:24:23 -05:00
James Cammarata e06e9b40b7 Updating CHANGELOG for 2.1.6 final release 2017-06-01 13:21:14 -05:00
James Cammarata a54a4fb78a New release v2.1.6.0-0.1.rc1 2017-05-09 08:54:21 -05:00
James Cammarata fd30f53289 Fixing security issue with lookup returns not tainting the jinja2 environment
CVE-2017-7481

Lookup returns wrap the result in unsafe, however when used through the
standard templar engine, this does not result in the jinja2 environment being
marked as unsafe as a whole. This means the lookup result looses the unsafe
protection and may become simple unicode strings, which can result in bad
things being re-templated.

This also adds a global lookup param and cfg options for lookups to allow
unsafe returns, so users can force the previous (insecure) behavior.

(cherry picked from commit 72dfb1570d22ac519350a8c09e76c458789120ed)
(cherry picked from commit fadccda7c7a2e8d0650f4dee8e3cea93cf17acfd)
2017-05-08 15:59:55 -05:00
Nick Piper d68a911141 Minor typo correction varibles -> variables
No impact as variable wasn't used.
(cherry picked from commit 403c142750)
2017-05-01 08:45:24 -04:00
Matt Clay 813497bbda Use Shippable image: drydock/u16pytall:master (#24003)
* Use Shippable image: drydock/u16pytall:master
* Disable postgresql test needing privileged mode.
2017-04-26 17:02:02 +08:00
Brian Coca 0c692484e6 fix hashing when path is symlink
(cherry picked from commit 631a10745d)
2017-04-20 10:36:18 -04:00
Brian Coca 3e9d4607ce tolerate 'batch' systems that mess with stdin (#23596)
* tolerate 'batch' systems taht mess with stdin

fixes #23541

* have pause on windows tolerate devnull

* tuplie

(cherry picked from commit 586fcae398)
2017-04-18 11:42:36 -04:00
James Cammarata 39ce8c6610 New release v2.1.5.0-1 2017-03-27 13:46:04 -05:00
Brian Coca bd8f81b13b catch bad extra vars data earlier
Bad extra early (#22322)

(cherry picked from commit c71b15a696)
2017-03-07 13:48:17 -05:00
Matt Davis f5dbb555fa refresh azure_rm.py inventory from devel 2017-03-06 15:53:29 -08:00
James Cammarata 1805f6b6c0 New release v2.1.5.0-0.2.rc2 2017-03-03 16:39:52 -06:00
Matt Davis 8f5212befe fix azure_rm version checks (#22270)
* Use packaging.version.Version instead of LooseVersion for better support of prerelease tags (eg, 0.30.0 > 0.30.0rc6)
* Add explicit check/error for msrestazure package
(cherry picked from commit d12c93ef2b)
2017-03-03 13:29:37 -08:00
James Cammarata b25f0f4e00 New release v2.1.5.0-0.1.rc1 2017-02-21 18:08:12 -06:00
James Cammarata 109010c078 Additional lock down of conditionals
(cherry picked from commit 9751bf440e2b026874e70f950920e6dbee2e9115)
2017-02-21 17:45:47 -06:00
James Cammarata cdd5ed5f3d Rework how the Conditional class deals with undefined vars
Previously, the Conditional class did a simple check when an
AnsibleUndefinedVariable error was raised to see if certain strings were
present. This patch tries to be smarter by evaluating the variable contained
in the error string and compared to the defined/not defined conditionals in
the conditional string.

This also modifies the UndefinedError message from HostVars slightly to
match the format returned jinja2 in general, making it easier to match the
error message in the Conditional code.

Fixes #18514

(cherry picked from commit 81aa12eb1b)
(cherry picked from commit cfd57fcae2)
2017-02-21 17:44:01 -06:00
James Cammarata f40e1bc289 Use proper YAML constructor class for safe loading
(cherry picked from commit 9f0b354023)
2017-02-21 16:25:25 -06:00
James Cammarata ad7c55dd9d Also clean template data even if marked unsafe
Fixes #20568

(cherry picked from commit 86beb55a90)
2017-02-10 10:12:14 -06:00
Brian Coca 68dbed2a5e removed warn, which is added in 2.3 2017-02-10 08:30:33 -05:00
Brian Coca 20d67cc562 use regex vs list to weed out password fields
- also warn as module SHOULD have no_log
 - make password regex exportable for testing
 - avoids boolean fields

(cherry picked from commit 403e9d35df)
2017-02-09 18:20:10 -05:00
Matt Davis b4b65c6bd2 bump submodule refs 2017-02-09 11:32:50 -08:00
Brian Coca 2546e8b4e5 add url_password to 'cleanse' list
(cherry picked from commit 2f1ab29855)
2017-02-09 08:54:06 -05:00
Matt Clay 36d8612ac0 Use jinja2 import instead of pip to get version.
This resolves issues with older versions of pip.

(cherry picked from commit a8fb6f0958)
2017-01-19 12:22:01 -08:00
Matt Clay 799b180e2d Only test map on jinja2 >= 2.7
(cherry picked from commit ad65274643)
2017-01-19 12:18:52 -08:00
James Cammarata 44026f8d7b Add representer to AnsibleDumper for AnsibleUnsafeText
Fixes #20253
Fixes #20290
2017-01-19 10:58:49 -06:00
Toshio Kuratomi 978311bf3f Remove jinja2 version requirement in setup.py as we hope to have the next release working with jinja2-2.9. 2017-01-16 11:25:16 -08:00
James Cammarata 59407cf2cb Fix for bug in Conditional for older jinja2 versions
Fixes #20309

(cherry picked from commit af96cba7e1)
2017-01-16 13:17:25 -06:00
James Cammarata 2c832c736f New release v2.1.4.0-1 2017-01-16 10:20:00 -06:00
James Cammarata 71c8ab6ce8 Updating CHANGELOG 2017-01-16 09:58:29 -06:00
James Cammarata 8bbb2b3691 New release v2.1.4.0-0.3.rc3 2017-01-13 16:48:24 -06:00
James Cammarata 400a3b984e Additional security fixes for CVE-2016-9587
(cherry picked from commit b7cdc21aee)
2017-01-13 16:25:20 -06:00
Toshio Kuratomi 6c6570583f Add jinja2 version constraint.
We're not yet compatible with jinja2-2.9 so help out people using pip to
install to get a working installation.

(cherry picked from commit 06ed25e788)
2017-01-12 11:16:12 -08:00
James Cammarata b5daf2286c New release v2.1.4.0-0.2.rc2 2017-01-11 16:28:32 -06:00
James Cammarata 7ec84394f9 Partial revert of 76f7ce55
(cherry picked from commit a94a48f85f)
2017-01-11 15:57:41 -06:00
James Cammarata 35480106e5 Additional fixes for security related to CVE-2016-9587
(cherry picked from commit d316068831)
2017-01-11 15:57:38 -06:00
Computest 51559b0a51 Fixing another corner case for security related to CVE-2016-9587
(cherry picked from commit bcceada5d9)
2017-01-11 15:54:42 -06:00
Matt Clay bd1ba1e21a Update fedora images for testing. 2017-01-09 16:04:30 -08:00
Matt Clay 596765c605 Fix group_by test to work with jinja2 >= 2.9.
(cherry picked from commit cc3d131f50)
2017-01-09 15:28:49 -08:00
James Cammarata d69f096f3e New release v2.1.4.0-0.1.rc1 2017-01-09 10:52:21 -06:00
James Cammarata d7dd41146a Fixing security bugs CVE-2016-9587
(cherry picked from c8f8d0607c5c123522951835603ccb7948e663d5)
2017-01-09 10:44:02 -06:00
James Cammarata f7ff28336d Correctly set loader path when using list tags/tasks
Fixes #19398

(cherry picked from commit b688f11474)
2016-12-21 21:05:10 -06:00
James Cammarata 20df246504 Wrap unhandled errors from lookups in an AnsibleError
This provides better error handling, and prevents errors like KeyError
from bubbliing up to code in odd places.

Fixes #17482

(cherry picked from commit 85bbce9d6b)
2016-12-21 11:49:46 -06:00
René Moser f821d3a9f6 cloudstack: utils: fail friendlier if no zones available (#19332)
(cherry picked from commit 18b7852940)
2016-12-14 16:39:19 +01:00
James Cammarata a8421e2424 Updating core submodules for mysql fix 2016-11-29 13:24:59 -06:00
Michael Schuett 75cdac0d19 Fix auth in collins.py inventory
This forces basic auth to be used. Using the normal HTTPPasswordMgrWithDefaultRealm
password manager from urllib2 fails since collins doesn't send a 401 retry on failure.
More about this can be seen here http://stackoverflow.com/questions/2407126/python-urllib2-basic-auth-problem.
I added a small comment about the format of the host so others don't waste time like i did.

(cherry picked from commit 21813ed83e)
2016-11-23 14:39:50 -05:00
James Cammarata ff0b525608 Revert "Moved the _inventory.clear_group_dict_cache() from creating a group w… (#17766)"
This reverts commit a36ac1ad1b.
2016-11-18 14:28:59 -06:00
jamessewell a36ac1ad1b Moved the _inventory.clear_group_dict_cache() from creating a group w… (#17766)
* Moved the _inventory.clear_group_dict_cache() from creating a group which doesn't exist, to adding members to the group.

* Update __init__.py

Update to use changed: block to catch all changes for cache clear as suggested

(cherry picked from commit b91d4d884d)
2016-11-18 13:28:27 -06:00
Brian Coca d6ab369916 fix for filter fix
(cherry picked from commit 5d043b65d3)
2016-11-17 13:42:51 -05:00
Brian Coca 78e47b4bbd remove rsync path from returned facts
(cherry picked from commit 7c960d440f)
2016-11-17 13:21:26 -05:00
James Cammarata c3fbe526ca Catch loop eval errors and only raise them again if the task is not skipped
This should help on issues like #16222.

(cherry picked from commit 57cf5e431c)
2016-11-15 10:40:50 -06:00
Brian Coca eea3051a12 readded var incorrectly removed via merge 2016-11-04 17:22:51 -04:00
Brian Coca f99060b3b9 resolve inventory path on init
This allows meta refresh_inventory to work with relative paths
Added option to unfrackpath to not resolv symlinks
fixes #16857

(cherry picked from commit 8217c1c39c)
2016-11-04 17:22:51 -04:00
nyasukun 00c48637b0 fixed memoryerror when coping huge file (#16392)
* fixed

* support both python 2 and 3

(cherry picked from commit adea1f2b80)
2016-11-04 12:58:21 -07:00
James Cammarata 0f6b318b29 Belated updating of CHANGELOG for 2.1.3 2016-11-04 14:26:22 -05:00
James Cammarata 4cadc98049 New release v2.1.3.0-1 2016-11-04 11:51:44 -05:00
Toshio Kuratomi 394dd2be04 Revert "fix iteritems for python 3"
This reverts commit 0bb04e132a.

This was supposed to land on stable-2.2, not stable-2.1
2016-11-01 12:46:03 -07:00
Daniel Menet 0bb04e132a fix iteritems for python 3
(cherry picked from commit 19fdb58948)
2016-11-01 09:38:22 -07:00
James Cammarata b056a5823f New release v2.1.3.0-0.3.rc3 2016-10-25 10:30:52 -05:00
Matt Davis b5c95ea6fa backport various docker_common fixes from devel 2016-10-25 08:20:35 -07:00
Matt Davis 12a38bc75f fix version check to support >=rc5
(cherry picked from commit d1e1898b0e)
2016-10-24 21:23:05 -07:00
Thomas Quinot c03bdbfad0 Filter out internal magic and connection variables from facts returns
Fixes #15925

(cherry picked from commit 236c923c25)
2016-10-24 23:09:28 -05:00
James Cammarata e2926bd398 New release v2.1.3.0-0.2.rc2 2016-10-24 18:46:43 -05:00
Toshio Kuratomi 72cd60a1cd Update submodule refs 2016-10-24 15:50:40 -07:00
Toshio Kuratomi 14e021496d Add changelog for apt_key change 2016-10-24 15:50:07 -07:00
Matt Clay aadf342b46 Switch test_lookups to badssl/local testing. (#16466) (#18144)
* Switch test_lookups to badssl/local testing.
* Use var for checking badssl host content.

(cherry picked from commit 394430a61e)
2016-10-21 21:40:25 -07:00
John R Barker 60a2da09de Backport validate-modules to stable-2.1 so we can have versioned testing (#18120)
See #18001 for more details
2016-10-20 18:22:47 +01:00
James Cammarata e00dc01803 New release v2.1.3.0-0.1.rc1 2016-10-18 15:44:28 -05:00
Toshio Kuratomi 28016bf1a0 Add entry for using no_log with password in subversion 2016-10-17 12:44:21 -07:00
Toshio Kuratomi 768c1e2e58 Update core submodule to pull in fix for subversion and no_log 2016-10-17 12:44:01 -07:00
Toshio Kuratomi a18e2976c6 Update extras submodule ref to pull in postgresql_lang and postgresql_ext password hiding fix 2016-10-17 08:11:02 -07:00
Bill Nottingham 3f7ac72427 Change <support@ansible.com> - it's being retired. 2016-10-15 16:47:15 -07:00
Toshio Kuratomi 4ea3cd3a38 Fix paramiko for non-ascii become password
Cherry-picked from a part of f24c10c32b
2016-10-15 16:29:54 -07:00
Toshio Kuratomi 264b33da6b Fix become password using non-ascii for local connection
Fixes #18029

(cherry picked from commit efc5dac52c)

In 2.1, the to_bytes function doesn't have a surrogate_or_strict error handler
2016-10-15 11:29:51 -07:00
Bruno Rocha 3e58f08155 Fix unbound method call for JSONEncoder (#17970)
* Fix unbound method call for JSONEncoder

The way it is currently it will lead to unbound method error

```python
In [1]: import json

In [2]: json.JSONEncoder.default('object_here')
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-2-872fdacfda50> in <module>()
----> 1 json.JSONEncoder.default('object_here')

TypeError: unbound method default() must be called with JSONEncoder instance as first argument (got str instance instead)

```

But what is really wanted is to let the json module to raise the "is not serializable error" which demands a bounded instance of `JSONEncoder()`

```python
In [3]: json.JSONEncoder().default('object_here')
---------------------------------------------------------------------------
TypeError: 'object_here' is not JSON serializable

```

BTW: I think it would try to call `.to_json` of object before raising as it is a common pattern.

* Calling JSONEncoder bounded `default` method using super()

(cherry picked from commit b06fb2022c)
2016-10-11 08:33:47 -07:00
Brian Coca 972379c907 fix method signature 2016-10-07 10:27:10 -04:00
James Cammarata 6db31bb4c6 Moves 'statically included' messages to -vv verbosity 2016-10-06 08:56:42 -05:00
James Cammarata a7d0cc6e61 Move searching for roles in the cur basedir to last
Searching the DEFAULT_ROLES_PATH and the roles basedir should come
before this, and it has been a long standing oversight.

Fixes #17882

(cherry picked from commit 0a86ddc251)
2016-10-05 01:26:00 -05:00
Brian Coca ea5e2d46ee fixed storing of cwd
(cherry picked from commit d9d7e413a5)
2016-10-04 14:26:23 -04:00
Brian Coca e72d1e995f fixed usage of incorrect exception class
fixes #17895
2016-10-04 11:52:28 -04:00
James Cammarata 2a234c1ff9 Check for substates in is_failed before checking main state failure
Fixes #17882

(cherry picked from commit d09f57fb3a)
2016-10-04 01:43:38 -05:00
Brian Coca 751865d68c only change dir to playdir if local
fixes #17869
fixes #17770

(cherry picked from commit 49ce0c8bac)
2016-10-03 15:56:28 -04:00
Matt Davis ec1f19ac3a bump core submodule ref 2016-10-03 08:32:23 -07:00
James Cammarata 29f2f26278 New release v2.1.2.0-1 2016-09-29 10:01:35 -05:00
James Cammarata 5f23d09e7c Removing mount from CHANGELOG due to reversion 2016-09-29 09:53:34 -05:00
Toshio Kuratomi b989004a0b Pick up core reversion of mount changes 2016-09-29 07:19:23 -07:00
James Cammarata fd256a79a1 Updating CHANGELOG for 2.1 2016-09-29 08:51:49 -05:00
James Cammarata c3257027b9 New release v2.1.2.0-0.5.rc5 2016-09-27 10:41:46 -05:00
Toshio Kuratomi 4c845b4ef0 Update submodule ref for fix to mount backport 2016-09-27 07:28:05 -07:00
James Cammarata 3d8dac2084 New release v2.1.2.0-0.4.rc4 2016-09-26 14:45:29 -05:00
Toshio Kuratomi 2b7761cd37 Update core to pick up more mount changes 2016-09-26 12:10:21 -07:00
Toshio Kuratomi 60a6c83abc Add ini_file whitespace change to the CHANGELOG 2016-09-26 07:05:13 -07:00
Toshio Kuratomi ee4ba3ceca Update submodule refs 2016-09-26 07:04:24 -07:00
Toshio Kuratomi 66ffe199e5 Disable mount tests for now. Mount is buggy on too many platforms 2016-09-23 16:37:49 -07:00
Toshio Kuratomi 5635670191 Update submodule refs for mount fixes 2016-09-23 14:53:31 -07:00
Toshio Kuratomi a4611ff509 Add tests for the mount module (#17718)
* Add tests for the mount module

* Switch from unmounted to absent...

the code for mounting always modifies fstab so we need to always modify
fstab to cleanup as well.

* Fix comments and copyright
2016-09-23 12:54:09 -07:00
Abhijit Menon-Sen cc71765d9e Use loop_control.loop_var directly
6eefc11c converted task.loop_control into an object, but while the other
callers were updated to use .loop_var instead of .get('loop_var'), this
site was overlooked.

This can be reproduced by including with loop_control a file that does
set_fact; a simple regression test along these lines is included.

(cherry picked from commit 950cc26aab)
2016-09-23 10:39:06 -05:00
Peter Sprygada b5b8a756fc fixes issue with net_template failing unless src argument provided. (#17726)
This fixes an issue where the net_template action will fail if a non
required argument (src) is not provided.

fixes ansible/ansible-modules-core#4978
2016-09-23 09:24:30 -04:00
nitzmahone 4c9ed1f4fb bump extras submodule ref for win_chocolatey fix 2016-09-22 20:09:06 -07:00
Toshio Kuratomi e917a0bd7a Fix for cherry-pick e8dddc3679,
The cherry-pick allows module parameters to convert int to float, fixing
bugs where a float type is required.  However, it used types from the
six library for python3 compatibility as well.  Since we don't have
six in 2.1.0 this was buggy.  Merging the unittests for the problem
here: adc158a499 made this buggy behaviour
apparent.
2016-09-21 21:45:03 -07:00
Toshio Kuratomi 9de21e1eaa Update submodule refs 2016-09-21 20:41:52 -07:00
Toshio Kuratomi eaba2152f3 Remove _load_hosts() from Play initialization as it's no longer needed and it breaks using extra_vars defining a list for hosts (#17699)
Thanks to @jimi-c for the solution

Fixes #16583
2016-09-21 20:41:13 -07:00
Christoph adc158a499 Add a test for int/float parameter type checking (#16741)
A parameter of type int should accept int and string, but not float.
A parameter of type float should accept float, int, and string.

Also reset the arguments in another test so that it runs cleanly.  This
agrees with what all the other tests are doing.
2016-09-21 20:39:02 -07:00
James Cammarata 01439aafaf Create a raw lookup for hostvars that does not template the data
When using hostvars to get extra connection-specific vars for connection
plugins, use this raw lookup to avoid prematurely templating all of the
hostvar data (triggering unnecessary lookups).

Fixes #17024

(cherry picked from commit ac5ddf4aa092e12f9e1c85c6b74aa30b7ef0a382)
2016-09-21 13:16:58 -05:00
Matt Clay b7168d2ac8 Remove Travis config/badge and add Shippable. (#17680) 2016-09-20 17:35:57 -07:00
James Cammarata c004ae578d Take ITERATING_ALWAYS into account when setting failed state in PlayIterator
Fixes #15963

(cherry picked from commit 4dc2bf4815)
2016-09-20 11:31:23 -05:00
James Cammarata efe5bb122e Rework the way params are assigned to TaskIncludes when they're dynamic
Copying the TaskInclude task (which is the parent) before loading the blocks
makes the code much more simple and clean, and fixes a bug introduced during
the performance improvement changes (and specifically the change which moved
things to a single-parent model).

Fixes #17064

(cherry picked from commit f4237b2151)
2016-09-19 14:29:08 -05:00
René Moser 27c621950c cloudstack: fix has_changed dict values comparsion (#17632)
In some rare situations, the CloudStack API returns string for numbers
when we expected int.

With this fix, we ensure we compare the types expected.
2016-09-19 14:08:21 +02:00
Andrea Tartaglia 7bfa36eb8b Pass the absolute path to dirname when assigning basedir (#17457)
* Pass the absolute path to dirname when assigning basedir

If no path is specified when calling the playbook, os.path.dirname(playbook_path) returns ''
This will cause failure when creating the retry file.

Fixes #17456

* Updated to use os.pathdirname(os.path.abspath())
2016-09-16 08:20:47 -07:00
James Cammarata fb71a9dfd9 New release v2.1.2.0-0.3.rc3 2016-09-14 16:51:18 -05:00
Jay d86b2ec225 Fix issue with proxy setting leaving temp files (#17073)
When using the no_proxy option, the temp ca files that are created for
setting up an ssl context were not being deleted. 

This small patch fixes that issue by removing the temp file.
2016-09-13 16:15:48 -04:00
Ryan S. Brown 7ea342d8ec bump core submodule ref 2016-09-13 10:57:53 -04:00
Toshio Kuratomi 6ba8926889 Update submodule refs 2016-09-12 09:02:44 -07:00
nitzmahone 2e06f0b427 add win_user regression tests
Ensure https://github.com/ansible/ansible-modules-core/issues/4369 doesn't occur again, also adds tests for password set when expired.

(cherry picked from commit 69880f1640)
2016-09-11 20:47:40 -07:00
nitzmahone a900a0b4e3 bump core submodule ref 2016-09-11 20:47:31 -07:00
Monty Taylor c536ca399a Fix galaxy import command for 2.1 (#17417)
The g_connect decorator is required on create_import_task or else
self.baseurl is None. This is fixed in devel already.
2016-09-09 18:04:09 -04:00
Matt Clay 2587d2aaf9 Add partially backwards compatible version of _fixup_perms. (#17427)
Also added a deprecation notice for _fixup_perms.

Resolves issue #17352 (assumes custom actions use recursive=False).

(cherry picked from commit 94a0d2afb4)
2016-09-06 16:56:47 -07:00
James Cammarata e83840c3fd New release v2.1.2.0-0.2.rc2 2016-08-31 12:57:52 -05:00
Toshio Kuratomi e8dddc3679 For module parameter checking, allow conversion of int to float (#17325)
Fixes https://github.com/ansible/ansible-modules-core/issues/4084
2016-08-31 09:01:03 -07:00
James Cammarata da3fd2d588 Several fixes for includes
* when including statically, make sure that all parents were also included
  statically (issue #16990)
* properly resolve nested static include paths
* print a message when a file is statically included

Fixes #16990

(cherry picked from commit 1c7e0c73c9)
2016-08-31 00:09:33 -05:00
Will Thames c9b212c5bd task_result _check_key should handle empty results (#16766)
When a task result has an empty results list, the
list should be ignored when determining the results
of `_check_key`. Here the empty list is treated the
same as a non-existent list.

This fixes a bug that manifests itself with squashed
items - namely the task result contains the correct
value for the key, but an empty results list. The
empty results list was treated as zero failures
when deciding which handler to call - so the task
show as a success in the output, but is deemed to
have failed when deciding whether to continue.

This also demonstrates a mismatch between task
result processing and play iteration.

A test is also added for this case, but it would not
have caught the bug - because the bug is really in
the display, and not the success/failure of the
task (visually the test is more accurate).

Fixes ansible/ansible-modules-core#4214
(cherry picked from commit eb2a3a91a8)
2016-08-29 08:40:38 -07:00
Michael Scherer 478283f571 Do not convert Nonetype to "None" (#17261)
If someone use a task with a empty name like this:

  - name:
    command: true

This will result in displaying 'None' as a task name instead of
'command'.
2016-08-26 09:53:29 -07:00
Remi Ferrand 5035b8a8bf Properly template task names in free strategy
* Fixes #16295
2016-08-26 09:53:20 -07:00
Toshio Kuratomi 559fcbe531 Add fetch fix to changelog 2016-08-25 21:54:10 -07:00
Toshio Kuratomi 0cce86cac9 Fix fetch idempotence (#17255)
Fetch always follows symlinks when downloading so it needs to always
follow symlinks when getting the checksum of the file as well.
2016-08-25 21:53:20 -07:00
James Cammarata f80c981ef6 Clean up PlaybookExecutor logic for batches and errors
The calculation for max_fail_percentage was moved into the linear
strategy a while back, and works better there in the stategy layer
rather than at the PBE layer. This patch removes it from the PBE layer
and tweaks the logic controlling whether or not the next batch is run.

Fixes #15954

(cherry picked from commit 890e096b2b)
2016-08-25 12:28:08 -05:00
Ansible Test Runner fc3efdb057 Changelog entry for https://github.com/ansible/ansible/pull/17225 2016-08-24 11:04:10 -07:00
Toshio Kuratomi 7d6df50e0e The former match for a section header included hosts that began with a range. (#17225)
Checking that the line ends with "]" narrows that window somewhat,

Fixes #15331
2016-08-24 10:59:32 -07:00
James Cammarata b2fe1b39df New release v2.1.2.0-0.1.rc1 2016-08-22 16:15:56 -05:00
Toshio Kuratomi 719c73afa2 Fix tmpfile misspelled as tmplfile (#17183) 2016-08-22 11:32:40 -07:00
James Cammarata bf929ac532 Backport of f722d41 to the stable-2.1 branch
Related to #15915
2016-08-22 10:21:45 -05:00
nitzmahone 9478e41394 bump extras submodule ref for another win_chocolatey fix 2016-08-17 16:41:00 -07:00
nitzmahone 29dd1635fe bump submodule refs 2016-08-17 15:51:09 -07:00
James Cammarata ea7a038f6f Don't use an unset playbook basedir when searching for hostgroup vars
The flag new_pb_basedir is not being utilized in Inventory._get_hostgroup_vars,
leading to the situation where an inventory with no playbook basedir set will
read host/group vars from the $CWD, regardless of the inventory and/or playbook
relative location. This patch corrects that by not using the playbook basedir
if it is unset (None).

This patch also corrects a bug in which the VariableManager would accumulate
host/group vars files, which could lead to incorrect vars files being used when
playbooks are run from different directories containing their own group/host vars
directories.

Fixes #16953

(cherry picked from commit b617d62203)
2016-08-17 16:29:13 -05:00
Brian Coca cf0eb42ad5 fixed tests to accoutn for new parameter
(cherry picked from commit b1410fa278)
2016-08-17 10:58:49 -04:00
James Cammarata 77fe1ac7af Catch a missing filename in include_vars
Fixes ansible/ansible-modules-core#4445
2016-08-17 09:30:14 -05:00
Nathaniel Case 8312df1512 Relicense netcfg.py to BSD (#17110)
As with #17025. The caveat regarding machilde's (now obsoleted) commit has been addressed, so netcfg.py is able to be easily relicensed.
2016-08-16 16:35:07 -04:00
Brian Coca 2de8d2ece7 make parsed param private and explicit (#17104)
* make parsed param private and explicit

* fixed missed parsed

(cherry picked from commit 5fe9d3c3d5)
2016-08-16 11:59:47 -04:00
David Shrewsbury d35377dac7 Fix async logic when parsing fails (#17091)
We want to NOT consider the async task as failed if the result is
not parsed, which was the intent of:

  https://github.com/ansible/ansible/pull/16458

However, the logic doesn't actually do that because we default
the 'parsed' value to True. It should default to False so that
we continue waiting, as intended.
(cherry picked from commit bf8c871801)
2016-08-15 13:37:26 -04:00
Brian Coca 616a51ac00 updated submodule refs 2016-08-12 10:48:24 -04:00
Jim Ladd 3749d44cd5 Increase local version for unofficial rpms (#17045) 2016-08-11 17:36:37 -07:00
nitzmahone 1601e24593 bump core submodule ref to pick up reverted unarchive change 2016-08-11 12:29:30 -07:00
nitzmahone 69d66727ca update core submodule ref 2016-08-11 11:29:01 -07:00
Nathaniel Case 6fe13bbb47 Relicense low-hanging fruit to BSD (#17025) 2016-08-10 10:49:27 -07:00
Toshio Kuratomi e71cce7776 Add sync: poll fix to changelog 2016-08-08 11:16:59 -07:00
Toshio Kuratomi f695dd6892 Fix from @jimi-c to use task_vars when polling async tasks. (#17003)
This is needed so that async_status can use ansible_python_interpreter
for the host being contacted.

Fixes #14101
(cherry picked from commit 38ccd11cce)
2016-08-08 11:07:32 -07:00
Brian Coca 9255a618e3 set cwd to task's basedir (#16805)
* switch cwd to basedir of task

This restores previous behaviour in pre 2.0 and allows for 'local type' plugins
and actions to have a more predictable relative path.

fixes #14489

* removed FIXME since prev commit 'fixes' this

* fix tests, now they need a loader (thanks jimi!)

(cherry picked from commit e2f17f8d9b)
2016-08-08 13:14:00 -04:00
Matt Clay cf9ef724e9 Use file list, not recursion, in _fixup_perms. (#16924)
Run setfacl/chown/chmod on each temp dir and file.

This fixes temp file permissions handling on platforms such as FreeBSD
which always return success when using find -exec. This is done by
eliminating the use of find when setting up temp files and directories.

(cherry picked from commit 72cca01cd4)
2016-08-05 18:50:31 -07:00
James Cammarata f956ff9619 Tweak the way the debug strategy imports the linear strategy parent
Due to the way we load plugins, internally to Python there can be issues when
the debug strategy is loaded after the linear strategy. To work around this,
we're changing the import line for the linear strategy to avoid the problem.

Related to #16825

(cherry picked from commit 1714279b5e)
2016-08-05 10:08:38 -05:00
Toshio Kuratomi 0000b76a0a YAML treats some unquoted strings as booleans. For instance, (#16961)
uri:
    follow_redirects: no

Will lead yaml to set follow_redirects=False.  This is problematic when
the module parameter is not a boolean value but a string.  For instance:

  follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']),

Our parameter validation code ends up getting follow_redirects="False"
instead of "no".  The 100% fix is for the user to quote their strings in
playbooks like:
  uri:
    follow_redirects: "no"

But we can fix quite a few common cases by trying to switch "False" back
into the string that it was specified as.  We only do this if there is
only one correct choices value that could have been specified.  In the
follow_redirects example, a value of "True" only maps back to "yes" and
a value of "False" only maps back to "no" so we can do this.  If choices
also contained "on" and "off" then we couldn't map back safely and would
need to force the module author to change the module to handle this
case.

Fixes parts of the following PRs:

* https://github.com/ansible/ansible-modules-core/pull/4220
* https://github.com/ansible/ansible-modules-extras/pull/2593
(cherry picked from commit 6db6edfc4f)
2016-08-05 06:51:18 -07:00
Toshio Kuratomi 7bd9128848 * Fix race in creating temp directories pre-fork (#16965)
* These can still race when multiple ansible processes are created at
    the same time.
* Reverse order of expanduser and expandvars in unfrakpath(). So that
  tildes in environment variables will be handled.
(cherry picked from commit 1ecf51d87e)
2016-08-04 16:43:22 -07:00
Toshio Kuratomi 9f645cdbdb Add the PID of the Ansible process to local_tmp directory. (#16589)
This aids in associating a leftover cachedir with a previous run of
Ansible.  Came about because of #16489
(cherry picked from commit 3f4027f7b4)
2016-08-04 16:40:40 -07:00
Matt Davis baaa1d3013 fix for unspecified retries on until + test (#16963)
fixes #16907
(cherry picked from commit 746ea64d30)
2016-08-04 19:31:49 -04:00
ovcharenko e464237894 Fix for issue @synchronize doesn't substitute variables properly #16347 (#16349)
* Fix for issue @synchronize doesn't substitute variables properly #16347
2016-08-04 11:30:43 -07:00
victoru 08ae111757 raise AnsibleError in hashi_vault lookup plugin when hvac module is not installed (#16859)
(cherry picked from commit 14901b65d9)
2016-08-04 10:06:42 -07:00
Brian Coca df16e37ad7 Implicit localhost ondemand (#16900)
* Revert "There can be only one localhost"

This reverts commit 5f1bbb4fcd.
this broke several usages of localhost, see #16882, #16898 and #16886

* ensure there is only 1 localhost

fixes #16886, #16882 and #16898

- make sure localhost exists before returning it
- optimzed host caching
- ensure we always return a host object

(cherry picked from commit f7570f1dc4)
2016-08-02 10:39:37 -04:00
Lukas Pirl db4e661fef (re)allow ansible_python_interpreter to contain more than 1 arg (#16247) 2016-07-28 13:55:02 -07:00
Andrew Gaffney 167a12003d Fix quoting of args for old-style modules
This removes the extra layer of quotes around values in the 'args' file.
These quotes were there before the pipes.quote() call was added, but
were not removed, resulting in too much quoting.
2016-07-28 13:55:02 -07:00
elotje 3b6b4f6ce4 Unprivileged become on HP-UX/UNIX (Fixes #16249) (#16275)
Problem: When setting the file permissions on the remote server for
unprivileged users ansible expects that a chown will fail for unprivileged
users. For some systems (e.g. HP-UX) this is not the case.

Solution: Change the order how ansible sets the remote permissions.
* If the remote_user sudo's to an unprivileged user then we attempt to
  grant the unprivileged user access via file system acls.
* If granting file system acls fails we try to change the owner of the
  file with chown which only works in case the remote_user is privileged
  or the remote systems allows chown calls by unprivileged users (e.g.
  HP-UX)
* If the chown fails we can set the file to be world readable so that
  the second unprivileged user can read the file. Since this could allow
  other users to get access to private information we only do this
  ansible is configured with "allow_world_readable_tmpfiles" in the
  ansible.cfg
2016-07-28 13:55:02 -07:00
James Cammarata 780c363482 New release v2.1.1.0-1 2016-07-28 10:32:34 -07:00
James Cammarata 32b6114bef Updating CHANGELOG for 2.1.1 2016-07-28 10:17:27 -07:00
James Cammarata 02389d6c51 New release v2.1.1.0-0.5.rc5 2016-07-24 18:06:49 -05:00
Alex Mirski-Fitton 044547034d Make returning results after loading vars optional
Reinstates some functionality removed by commit 0ba9a6a but
makes the expensive operation optional (and default to off)

(cherry picked from commit 7dc09adaf4)
2016-07-24 11:51:57 -05:00
James Cammarata 6ca9b406ec New release v2.1.1.0-0.4.rc4 2016-07-23 16:59:25 -04:00
nitzmahone 3ec6e95e47 bump core submodule ref 2016-07-22 10:00:02 -07:00
Toshio Kuratomi 53c348c89d Only show the traceback for importing cryptography when in Ansible Debug. (#16795) 2016-07-22 05:41:45 -07:00
Toshio Kuratomi 3e964dbfae Update submodules 2016-07-21 11:11:44 -07:00
Connor Osborn 505a1de605 Fix exceptions thrown from cryptography import (#16723)
A simple import of cryptography can throw several types of errors. For example,
if `setuptools` is less than cryptography's minimum requirement of 11.3, then
this import of cryptography will throw a VersionConflict here. An earlier case
threw a DistributionNotFound exception.

An optional dependency should not stop ansible. If the error is more than
an ImportError, log a warning, so that errors can be fixed in ansible or
elsewhere.
2016-07-21 05:30:42 -07:00
James Cammarata ed959d72f1 Fix bug where getting role vars does not follow the dep chain
This bug was introduced in 3ced6d3, where getting vars from a role
did not follow the dep chain. This was originally hidden by the fact
that we got vars twice (from the block and from the roles directly).

Fixes #16729

(cherry picked from commit d8a3feb976)
2016-07-19 17:02:01 -04:00
James Cammarata 35da6ba9d1 New release v2.1.1.0-0.3.rc3 2016-07-18 16:12:07 -04:00
Toshio Kuratomi cf6f46683a Update submodule ref 2016-07-15 06:48:15 -07:00
jctanner b7479a1dc6 Add a function to check for killed processes in all strategies (#16684)
* Add a function to check for killed processes so that if any
threads are sigkilled or sigtermed, the entire playbook execution is aborted.

(cherry picked from commit 238c6461f6)
2016-07-14 17:31:09 -04:00
Adrian Likins fb6e58e888 Update submodule refs 2016-07-14 15:20:54 -04:00
Toshio Kuratomi 87dcec9fc5 Update submodule refs 2016-07-13 10:34:52 -07:00
James Cammarata 137385059c Fixing type in 293723f (mock_handler -> mock_handler_task) 2016-07-12 16:59:30 -05:00
James Cammarata 293723f4f6 Fix unit test for base strategy in regards to handler changes 2016-07-12 16:49:23 -05:00
James Cammarata bac0028350 Fix unreachable host/any_errors_fatal bug in linear strategy
2e003adb added the ability for tasks using any_errors_fatal to fail
when there were unreachable hosts. However that patch used the running
unreachable hosts data rather than the results from the current task,
which causes failures when any run_once or BYPASS_HOST_LOOP task is hit
after an unreachable host causes a failure. This patch corrects that by
using the current set of results to determine if any hosts were
unreachable during the last task only.

Fixes ansible/ansible-modules-core#4160

(cherry picked from commit 245ce9461d)
2016-07-12 03:06:38 -05:00
nitzmahone ad3128e661 Revert "tkuratomi bugfix for BOMs in powershell modules"
This reverts commit 2398ca917f.
2016-07-11 13:23:17 -07:00
nitzmahone e7690b0dd1 Revert "update action unit test for powershell shebang behavior"
This reverts commit 637f6f23e1.
2016-07-11 13:23:04 -07:00
nitzmahone 637f6f23e1 update action unit test for powershell shebang behavior
(cherry picked from commit de549ad675)
2016-07-11 12:38:53 -07:00
nitzmahone 2398ca917f tkuratomi bugfix for BOMs in powershell modules
fixes #15998

(cherry picked from commit 31e963dd2a)
2016-07-11 11:52:15 -07:00
Andrew Gaffney 628a67563f Fix typo on -K/--ask-become-pass option in 'ansible' man page (#16667) 2016-07-11 00:18:02 +02:00
Chris Houseknecht 909d629c73 Upgrade to azure 2.0.0rc5 and add locations parameter.
(cherry picked from commit 7964a2fca3)
2016-07-09 01:59:23 -04:00
= c6faf106f8 fix win_msi tests by setting wait: true on all win_msi tasks.
I suspect this problem was masked previously as older versions of pywinrm
where significantly slower, allowing more time for the windows installer service to complete.
2016-07-08 13:36:06 -07:00
James Cammarata eff49c968a New release v2.1.1.0-0.2.rc2 2016-07-07 15:25:28 -05:00
James Cammarata 4b86191a24 Updating integration tests for async testing
(cherry picked from commit cd4412016a)
2016-07-07 14:18:26 -05:00
James Cammarata 1f74df5c91 Updating submodule refs 2016-07-07 13:50:47 -05:00
nitzmahone 9eb32357de bump extras submodule ref 2016-07-07 10:58:40 -07:00
James Cammarata 56737da097 Updating submodule refs 2016-07-07 11:32:12 -05:00
Brian Coca 11eefdc486 correctly set become value in console
fixes #16614

(cherry picked from commit 906dc99c64)
2016-07-07 10:43:19 -04:00
René Moser 5b9f795140 cloudstack: handle unicode API results in has_changed (#16601)
* cloudstack: handle unicode API results in has_changed

* cloudstack: add more case sensitve keys
2016-07-07 11:01:30 +02:00
James Cammarata 8a2f54bcee Force remote_user to be the currently user for connection=local
Fixes ansible-modules-core#4092

(cherry picked from commit 9248cde239)
2016-07-06 23:10:47 -05:00
Toshio Kuratomi 4dfc0edbc1 Update submodule for unarchive fixes 2016-07-06 16:24:04 -07:00
Toshio Kuratomi ef5a504b8b Update submodule refs 2016-07-06 14:43:29 -07:00
James Cammarata 68232d10cf Fix unit tests for handler stuff in base strategy test 2016-07-06 16:40:13 -05:00
Monty Taylor e5235e151d Don't treat parsing problems as async task timeout (#16458)
* Don't treat parsing problems as async task timeout

If there is a problem reading/writing the status file that manifests as
not being able to parse the data, that doesn't mean the task timed out,
it means there was what was likely a tempoarary problem. Move on and
keep polling for success. The only things that should cause the async
status to not be parseable are bugs in the async_runner.

* Add comment explaining not bailing out of loop

* Return different error when result is unparseable

* Remove extraneous else
2016-07-06 14:35:18 -07:00
James Cammarata feebe73ede Fix the way handlers are compiled and found/notified
* Instead of rebuilding the handler list all over the place, we now
  compile the handlers at the point the play is post-validated so that
  the view of the play in the PlayIterator contains the definitive list
* Assign the dep_chain to the handlers as they're compiling, just as we
  do for regular tasks

(cherry picked from commit 930d090507)
2016-07-06 15:46:43 -05:00
Michael Scherer ff601f4161 Port azure_rm_common.py to py3 syntax (#15880)
Since the rest of the file already use a non 2.4 syntax
(such as format), I didn't bother using the 2.4 syntax for
exceptions.
(cherry picked from commit eb52dc9af0)
2016-07-05 16:59:34 -07:00
Chris Houseknecht 6b84306f70 Fix authentication via params and env vars. Update guide to RC5.
(cherry picked from commit c71a939b08)
2016-07-05 16:53:23 -07:00
chouseknecht 5e04dcf026 Update and pin to azure-2.0.0rc5
(cherry picked from commit a076612b63)
2016-07-05 16:52:39 -07:00
nitzmahone 16c3fc5cec prevent spurious pywinrm arg warnings for non-pywinrm connection args
(cherry picked from commit c5e0d3d17b)
2016-07-05 16:05:42 -07:00
Brad Davidson 75a3526680 Ignore limits and restrictions when parsing inventory.
We want to update host vars for all hosts (even those that might
have failed), and the in case of a refresh_inventory, the code has
a stale restrictions list at this point anyway.

(cherry picked from commit 0c4effb8a6)
2016-07-05 16:02:13 -05:00
Robin Roth 5982a0632f Fix git shallow update (#16224)
* add git shallow fetch test

covers https://github.com/ansible/ansible-modules-core/issues/3782

updating a repo with depth=1 fails silently if version==HEAD

* raise git version support supporting depth to 1.9.1

(cherry picked from commit d0ccedc617)
2016-07-05 17:00:20 -04:00
James Tanner 18ea3f1178 update core submodule reference 2016-07-05 16:20:43 -04:00
Martin Matuska fbf8f5bccb Determine failed hosts with _check_failed_state() (#16566)
(cherry picked from commit 8bff6154a6)
2016-07-05 10:08:48 -05:00
James Cammarata 6bcdb575e8 Fix the way pull localhosts out of inventory for delegate_to
This patch corrects the way we look in the inventory hosts list for
implicit localhost entries when localhost aliases are used.

Fixes #16568

(cherry picked from commit 83e4a4048b)
2016-07-04 11:26:11 -05:00
Matt Clay 8659f255df Fix test_async. (#16552)
* Conditionally run test_async in docker containers.
* Revise test_async test.

(cherry picked from commit 4e369a31db)
2016-07-01 18:01:16 -07:00
Matt Clay 6755e9c848 Run test_async after test_connection. 2016-07-01 16:06:13 -07:00
Matt Clay 75fa80f73c Parse async response in async action. (#16534)
* Parse async response in async action.
* Add async test for non-JSON data before module output.
* Fix existing async unit test.

Resolves #16156

(cherry picked from commit 292785ff2b)
2016-07-01 15:24:41 -07:00
Matt Clay a598f26006 Update shippable shared dir for COPY_SOURCE. (#16511)
Tests now use '/shared' instead of '/tmp/shared-dir' when using
COPY_SOURCE. This avoids issues with containers purging '/tmp'.

(cherry picked from commit fbfadc47c7)
2016-06-29 15:11:32 -07:00
James Cammarata 3fd94769dc Save and restore the loader's basedir when loading playbooks
Fixes #15930
2016-06-29 12:52:52 -04:00
Toshio Kuratomi 9e5fc8f08f In some cornercases, sys.executable is empty. Work around that by choosing a sensible default path to python and emitting a warning (#16487)
Fixes #13585

Fix grammar of warning method
2016-06-29 08:51:45 -07:00
Peter Sprygada e826d3c7d7 Merge pull request #16496 from privateip/stable-2.1
fixes minor issue with expanding blocks in netcfg
2016-06-29 06:48:33 -07:00
Peter Sprygada b70d83fe1d fixes minor issue with expanding blocks in netcfg
This fixes a minor bug where blocks in netcfg where not being expanded
when replace=block was specified.
2016-06-29 06:30:00 -07:00
Matt Clay 3489dcfd94 Update shippable notifications to match devel. 2016-06-28 16:32:07 -07:00
Peter Sprygada ba97d6069a Merge pull request #16483 from privateip/bugfix-netcfg
fixes dependency on OrderedDict in netcfg
2016-06-28 16:05:49 -07:00
Peter Sprygada c80d8b97e8 fixes dependency on OrderedDict in netcfg
This removes the need for OrderedDict which will make the update
capable with Python version < 2.7

fixes ansible/ansible-modules-core#3979
2016-06-28 15:36:41 -07:00
James Cammarata dd15a15edc Don't modify the original task ds when preprocessing data
In the case of using YAML anchors/aliases, YAML actually uses references
to the duplicated object so any modifications to the original impacts
later uses of the object.

Fixes #13575

(cherry picked from commit af249b83e6)
2016-06-28 03:09:36 -05:00
Pilou 57eb60757c Lookup password omit salt (#16361)
* Lookup unencrypted password must not include salt
* Integration test lookup: remove previous directory
* Test that lookup password doesn't return salt
* Lookup password: test behavior with empty encrypt parameter

Closes #16189

(cherry picked from commit b361bf90d7)
2016-06-27 10:47:33 -07:00
Toshio Kuratomi 25c5314f2e Update submodule refs 2016-06-24 09:02:38 -07:00
James Cammarata 084a3e13b0 Fix off-by-one error with retries
Fixes #16408

(cherry picked from commit 948682dbe2)
2016-06-23 18:08:00 -05:00
James Cammarata eeba5e2c3e Remove unnecessary role initialization and lookup stuff
* Remove unnecessary copying of values from parents to role deps, as
  this can cause problems when roles have multiple parents (or the same
  parents with different params speficied through deps)
* Since we're already checking the dep chain in the block for role
  things (which every task in a role should have), it is not necessary
  to check the role directly in case it improperly grabs something

Fixes #14438

(cherry picked from commit 3e4755f7e4)
2016-06-23 17:02:14 -05:00
Bill Nottingham e877ea7f2b Update intro_configuration.rst (#16416)
Fix callback plugin path (per lib/ansible/constants.py)
2016-06-23 11:07:10 -04:00
James Cammarata 7da2265e10 Properly wrap objects using json default encoder
Our custom encoder for the to_json filter was simply returning the
object if it was not a HostVars object, leading in some cases to a
TypeError when the data contained an undefined variable. This lead
to an odd error message being propagated up, so we now properly catch
this as an undefined variable error.

Fixes #15610

(cherry picked from commit c24c0f5f6b)
2016-06-23 09:20:35 -05:00
ivovangeel cb520bd86a Fixed bug in find_mount_point function
The find_mount_point function does not resolve the mount point of paths with a soft-link correctly and returns the wrong mount-point.

I have mounted an NFS filesystem on /nfs-mount. This directory contains a directory called "directory". I also created a soft-link to this last directory: /soft-link-to-directory -> /nfs-mount/directory. I created the following task to copy a file into /soft-link-to-directory:

    - name: copy file to nfs-mount
      copy:
        src: "file"
        dest: "/soft-link-to-directory/file"

This throws an exception:

invalid selinux context: [Errno 95] Operation not supported

This is caused by the find_mount_point function to return '/' as the mount point for '/soft-link-to-directory/file'. This should have been /nfs-mount. Because the find_mount_point returns the wrong mount-point, the is_special_selinux_path function does not recognise the file is on an NFS mount and tries to set the default SELinux context (system_u:object_r:default_t:s0), which fails. The context should have been: system_u:object_r:nfs_t:s0

Full Ansible output:

TASK [copy file to nfs-mount] **************************************************
fatal: [hostname]: FAILED! => {"changed": false, "checksum": "f34b60930a5d6d689cf49a4c16bd7f9806be608c", "cur_context": ["system_u", "object_r", "nfs_t", "s0"], "failed": true, "gid": 24170, "group": "foundation", "input_was": ["system_u", "object_r", "default_t", "s0"], "mode": "0644", "msg": "invalid selinux context: [Errno 95] Operation not supported", "new_context": ["system_u", "object_r", "default_t", "s0"], "owner": "root", "path": "/soft-link-to-directory/.ansible_tmpWCT6Z4file", "secontext": "system_u:object_r:nfs_t:s0", "size": 37, "state": "file", "uid": 0}
(cherry picked from commit 23349911f1)
2016-06-22 22:39:30 -05:00
nitzmahone 62b3909ef5 bump extras submodule ref 2016-06-22 17:20:30 -07:00
nitzmahone 5f1d50d53d bump core submodule ref 2016-06-22 11:52:53 -07:00
James Cammarata a7199abb74 Take previously failed/unreachable hosts into account when checking the batch
Again, as we're carrying failed/unreachable hosts forward from play to play via
internal structures, we need to remember which ones had previously failed so that
unrelated host failures don't inflate the numbers for a given serial batch in the
PlaybookExecutor causing a premature exit.

Fixes #16364

(cherry picked from commit 4c1601e9f2)
2016-06-22 08:33:08 -05:00
Toshio Kuratomi a7f93be2b6 Use find -exec + so that failures will be visible in find's exit code (#16389)
Fixes #16385
2016-06-22 05:44:35 -07:00
nitzmahone 05536b3be5 bump extras submodule ref 2016-06-20 13:04:01 -07:00
James Cammarata 4d4bbcbb33 New release v2.1.1.0-0.1.rc1 2016-06-17 14:33:25 -05:00
nitzmahone bb43d4d00b fix win_setup integration test to match fact name
(cherry picked from commit a45e842ba1c030d9022bf2e23ca7ad5002a2019c)
2016-06-17 12:29:02 -07:00
Ryan S. Brown d6fef6f9a6 Update ansible-modules-core ref
Includes cherry-pick of [ansible-modules-core#3347](https://github.com/ansible/ansible-modules-core/pull/3347)
2016-06-17 15:15:22 -04:00
Toshio Kuratomi 7083d0b5ea Add setfacl on BSD fix to CHANGELOG 2016-06-17 11:51:33 -07:00
Toshio Kuratomi cd348abaa6 Update core submodule ref for wait_for fix 2016-06-17 10:17:21 -07:00
Toshio Kuratomi 9d91d2c6b8 Use find instead of -R for recursive setfacl (#16340)
(-R is not present on Solaris or freebsd setfacl)

Fixes #16322
2016-06-17 09:40:53 -07:00
Mark Szymanski 552172b141 Allowing changed colour to be set properly from ansible.cfg (#16338)
(cherry picked from commit 2b0d0d0323)
2016-06-17 12:28:05 -04:00
nitzmahone 55f2829e0c bump core submodule ref 2016-06-16 13:16:01 -07:00
nitzmahone 8ec11e9d6c bump extras submodule ref 2016-06-16 12:38:12 -07:00
= 5d03a65ee8 added further tests for win_regedit to cover the changes made
under https://github.com/ansible/ansible-modules-extras/pull/2436
2016-06-16 12:37:14 -07:00
nitzmahone 8223b83758 bump submodule refs 2016-06-15 14:50:02 -07:00
Colin Hoglund 877347ad20 allow spaces before the filter string (#16312)
(cherry picked from commit 5a2b34e159)
2016-06-15 14:17:56 -04:00
Toshio Kuratomi 8fc5a6a6f5 Update core submodule ref 2016-06-15 10:34:34 -07:00
Toshio Kuratomi 9a00b2d4af Check that required arguments src and dest are present.
Fixes #16301
2016-06-15 09:58:25 -07:00
Brian Coca d0cf725ef8 updated submodule refs 2016-06-15 11:26:32 -04:00
Brian Coca d88264ee3b add executable if none exists when become
this is needed as become adds commands that require shell

(cherry picked from commit 4acac55d23)
2016-06-15 11:26:32 -04:00
Adrian Likins 969c6a4443 zone plugin updates for newer api (#16289)
Fixes errors on import.
2016-06-15 07:39:33 -07:00
Toshio Kuratomi 7248314233 Make sure we don't end up with an empty PYTHONPATH (#16240)
When the PYTHONPATH is an empty string python will treat it as though
the cwd is in the PYTHONPATH.  This can be undesirable.  So make sure we
delete PYTHONPATH from the environment altgether in this case.

Fixes #16195
2016-06-15 07:07:11 -07:00
jctanner 73a3a5839b Force an include to be a static task if no vars or loops are being used. (#16192)
Fixes #15735
2016-06-15 00:54:09 -05:00
James Cammarata 43d1ea0cfc Track notified handlers by object rather than simply their name
Due to the fact that roles may be instantiated with different sets of
params (multiple inclusions of the same role or via role dependencies),
simply tracking notified handlers by name does not work. This patch
changes the way we track handler notifications by using the handler
object itself instead of just the name, allowing for multiple internal
instances. Normally this would be bad, but we also modify the way we
search for handlers by first looking at the notifying tasks dependency
chain (ensuring that roles find their own handlers first) and then at
the main list of handlers, using the first match it finds.

This patch also modifies the way we setup the internal list of handlers,
which should allow us to correctly identify if a notified handler exists
more easily.

Fixes #15084
2016-06-14 22:34:51 -05:00
jctanner f6727aff39 Add further commentary about synchronize process_remote. (#16292)
Addresses #16284
(cherry picked from commit dc9b53a6aa)
2016-06-14 22:30:14 -05:00
Sergei Smolianinov 304493e795 Fix synchronize module incorrect remote host processing. (#15993)
Fixes  #15539
(cherry picked from commit 9753cac530)
2016-06-14 22:29:59 -05:00
Toshio Kuratomi 75beca22a5 Be more lenient of symlinked /bin/sh inside the chroot (#16239)
Symlinks inside of the chroot were failng because we weren't able to
    determine if they were pointing to a real file or not.  We could write
    some complicated code to walk the symlink path taking into account where
    the root of the tree is but that could be fragile.  Since this is just
    a sanity check, instead we just assume that the chroot is fine if we
    find that /bin/sh in the chroot is a symlink.  Can revisit if it turns
    out that many chroots have a /bin/sh that's a broken symlink.

    Fixes #16097
2016-06-14 12:05:31 -07:00
nitzmahone fc8d8b5cde bump core submodule ref for windows setup fix 2016-06-14 11:35:57 -07:00
Brian Coca 97a9eaa299 ignore closing file error when file not open
fixes #16268

(cherry picked from commit 07846f7bec)
2016-06-14 09:34:57 -04:00
Peter Sprygada 6e2651ce07 fixes using ssh keyfile with junos network module
The junos network module will now properly use the ssh key file if its
passed from the playbook to authenticate to the remote device.  Prior
to this commit, the ssh keyfile was ignored.
2016-06-13 22:50:49 -04:00
Toshio Kuratomi 9cbb1a196b Workaround bad interaction with .pth files. (#16238)
When setuptools installs a python module (as is done via python setup.py
install)  It puts the module into a subdirectory of site-packages and
then creates an entry in easy-install.pth to load that directory.  This
makes it difficult for Ansiballz to function correctly as the .pth file
overrides the sys.path that the wrapper constructs.  Using
sitecustomize.py fixes this because sitecustomize overrides the
directories handled in .pth files.

Fixes #16187
2016-06-13 08:00:59 -07:00
Matt Clay 4c59c0b31d Fix docker unit test. Not deprecated in 2.1. 2016-06-11 09:14:54 -07:00
nitzmahone 467ef193fd call base _connect() from winrm._connect()
without it, we don't get the base's free become method error check

(cherry picked from commit 445a88d3e8)
2016-06-10 14:41:11 -07:00
Brian Coca 2982b99b8e better handling of retry files
(cherry picked from commit 734bbcb1d3)
2016-06-10 14:03:53 -04:00
nitzmahone 6b286ee0c8 fix for psuedo-connection hostvars not propagating to connection
(mostly done by jimi-c, tested working)

(cherry picked from commit 9f998dbfc49ab5b7a587db6a7099096a414f5841)
2016-06-10 10:17:56 -07:00
Brian Coca efed4e577c raw should not use default executable (#16085)
also removed unused cruft in script
(cherry picked from commit a529a60478)
2016-06-10 11:41:49 -04:00
J. Javier Maestro 9bb754c0a7 Fix: create retry_files_save_path if it doesn't exist (#15659)
* Fix: create retry_files_save_path if it doesn't exist

Ansible documentation states that retry_files_save_path directory will be
created if it does not already exist. It currently doesn't, so this patch
fixes it :)

* Use makedirs_safe to ensure thread-safe dir creation

@bcoca suggested to use the makedirs_safe helper function :)

(cherry picked from commit 828b73dd2d)
2016-06-10 09:41:30 -04:00
Toshio Kuratomi d65e2aec81 Update submodule ref 2016-06-09 15:52:23 -07:00
Brian Coca e971a63c33 avoid processing bad mtab entries (#16206)
fixes #16174
(cherry picked from commit c376954ecf)
2016-06-09 15:14:26 -04:00
Toshio Kuratomi 553ceefe81 Update submodule refs 2016-06-09 08:58:54 -07:00
James Cammarata c9b125d21a Fix hostvars lookup of locahost based on 5f1bbb4
(cherry picked from commit b51d0dd69b)
2016-06-08 14:15:02 -05:00
James Cammarata 00cf1cbac1 There can be only one localhost
The changes to exclude implicit localhosts from group patterns exposed
the bug that we sometimes create multiple implicit localhosts, which
caused some bugs with things like includes, where the host was used as
an entry into a dict, so having multiple meant that the incorrect host
(with a different uuid) was found and includes were not executed for
implicit localhosts.
2016-06-08 13:01:47 -05:00
James Cammarata 2e003adbc8 Expand return code values returned by TQM and strategies
This allows the PlaybookExecutor to receive more information regarding
what happened internal to the TaskQueueManager and strategy, to determine
things like whether or not the play iteration should stop.

Fixes #15523

(cherry picked from commit fbec2d9692)
2016-06-08 10:44:59 -05:00
Peter Sprygada d4c78b84f0 fixes issue with ssh keyfile and nxos authentication
The nxos cli provider would not properly handle ssh key files passed
from the playbook task.   The ssh_keyfile argument is now properly
passed to the ssh authentication method

This fix address the bug reported in #3862
2016-06-07 21:28:31 -04:00
nitzmahone 405f636cc7 bump submodule refs 2016-06-07 17:13:53 -07:00
James Cammarata a2c1247dff Revert "don't tempalte register"
This reverts commit 7ba790bbaf.

Fixes #15700
2016-06-07 13:40:36 -05:00
James Cammarata 3ced6d3e90 Further tweaks to variable precedence to make it match our docs
Also removes looking at role variables from the Block, as those are
merged in separately via VariableManager
2016-06-07 12:09:05 -05:00
James Cammarata a9c9cd773f Fix variable precedence issue where set facts beat role params
Also updates doc on variable precedence, as it was incorrect for the
order of play vars/vars_prompt/vars_files in relation to set_fact and
registered variables.

Fixes #14702
Fixes #14826
2016-06-07 11:25:26 -05:00
James Cammarata 313d94cc71 Create state in PlayIterator for unknown hosts rather than raise errors
Since we now use the PlayIterator to carry forward failures from previous
play executions, in the event that some hosts which had previously failed
are not in the current inventory we now create a stub state instead of
raising an error.
2016-06-07 09:45:22 -05:00
Krzysztof Magosa aa66551352 fix: playbook_on_task_start expects name and not task object (#16168)
(cherry picked from commit 721da46842)
2016-06-07 09:43:39 -04:00
Ritesh Khadgaray 66d54dec58 set remote_user to default if none is found when using delegate_to (#16138)
(cherry picked from commit ef9238ab85)
2016-06-07 09:37:14 -04:00
Peter Sprygada 6fc2660194 fixes issues with authenticating using ssh-agent for ios devices
Exception was raised when trying to use ssh-agent for authentication to
ios devices.   This fix enables ssh-agent and enable use of password
protected ssh keys.  There is one additional fix to capture authentication
exceptions nicely.
2016-06-07 06:33:35 -04:00
Matt Clay 8fd7e970a1 Remove opensuseleap from shippable tests in 2.1.
These tests were not part of CI at the time 2.1 was released.

They are not currently passing in stable-2.1, as changes necessary
for them to pass were made after the release.
2016-06-06 23:49:46 -07:00
Robin Roth 263e51095f use userdir module as example instead of alias (#15540)
* alias module is very basic and removing it leads to the suse default
  config failing
* future improvements might test different modules and the effect of
  them being removed
(cherry picked from commit cf62a62b83)
2016-06-06 18:54:26 -07:00
Rene Moser e1765c9d0d tests, postgresql: add ubuntu 16.04 support
(cherry picked from commit 5583027f99)
2016-06-06 18:08:36 -07:00
Rene Moser d14b29efc2 tests, apt_repository: disable Ubuntu 16.04 as there is no package yet
(cherry picked from commit 39e4caafb2)
2016-06-06 17:30:14 -07:00
Matt Clay 8b3ce600d0 Detect use of Travis tests on Shippable.
This can occur when building pre-Shippable branches or PRs.

(cherry picked from commit 03597143d0)
2016-06-06 16:48:17 -07:00
Robin Roth 945fb3411e Run tests on ubuntu1604 and opensuseleap (#15936)
* reduce async sleep time in test
* make zypper test less destructive (don't break following uses of zypper)
* fix ca cert on suse
* fix/enable postgres/mysql on opensuseleap
* fix mysql test for mysql versions 5.7.6 and newer
* skip sni_host check on ubuntu1604
* add HTTPTESTER flag for test_uri

ubuntu 16.04 uses dash which drops env variables containing a dot
we work around this by adding an explicit env variable to enable httptester

(cherry picked from commit c06884eff0)
2016-06-06 16:48:17 -07:00
Rene Moser e2ebae5522 tests: fix tests on Debian 8
(cherry picked from commit c20d1fced7)
2016-06-06 16:48:17 -07:00
Matt Clay 6f0bc4dd82 Corrected reference to httptester container.
(cherry picked from commit b755bcd875)
2016-06-06 16:36:54 -07:00
Matt Martz 247d4ebb8d Add httptester docker container files and update run_tests.sh to use ansible/httptester
(cherry picked from commit 164f247ec8)
2016-06-06 16:36:54 -07:00
Michael Scherer 86516eae05 Do not test vca and vmware.py for py2.4 (#15887)
Since both of them depend on libraries not
working on python 2.4, we shouldn't restrict
ourself on 2.4, cf https://github.com/ansible/ansible/pull/15870
(cherry picked from commit cc61531a74)
2016-06-06 16:36:54 -07:00
Matt Martz df33ff6c65 Use httptester docker image for http tests (#15811)
* Use httptester docker image for http tests

* When not running with an httptester linked container, use public test sites

(cherry picked from commit accf40d8a8)
2016-06-06 16:36:54 -07:00
Brian Coca 73b250ecf7 fix default for removing images
(cherry picked from commit 2af8e3b9d8)
2016-06-06 16:36:54 -07:00
Brian Coca d77ff116c2 added ability to also subset make tests
(cherry picked from commit 27a1ae4732)
2016-06-06 16:36:54 -07:00
Brian Coca 6c80be47a2 another var 'defaulted' in run_tests
(cherry picked from commit 47d58c30e4)
2016-06-06 16:36:54 -07:00
Brian Coca 77dc6a36fd added defaults for 'optional' vars
(cherry picked from commit 3669ab2456)
2016-06-06 16:36:54 -07:00
Brian Coca 217f8fd824 fine tuned shell switches for run_tests.sh
(cherry picked from commit 52a714143f)
2016-06-06 16:36:54 -07:00
Matt Clay dee38ceb95 Update how shippable scripts are called.
(cherry picked from commit 6d74f43eff)
2016-06-06 15:55:42 -07:00
Matt Clay 66f8da7258 Add full support for Shippable CI.
(cherry picked from commit b0e1efbd62)
2016-06-06 15:55:42 -07:00
Michael Schmid c1fe7aa322 Fixes #15915: New HandlerTaskInclude Class which can run TaskIncludes inside Handlers 2016-06-06 16:02:27 -05:00
jctanner 4cabc94f09 Force relative role paths to be absolute and correct errors about missing path(s). (#16088)
Addresses #10811
2016-06-06 15:46:34 -05:00
James Cammarata 7725c58315 Make sure we add host/group vars files when parsing inventory
Also fixes a bug where add_host was not adding the vars files

Fixes #16117
2016-06-06 15:45:55 -05:00
James Cammarata 9936d7355c Fix recursive call of _load_inventory_file() in VariableManager
Fixes #16128
2016-06-06 14:57:59 -05:00
James Cammarata 0ba9a6a875 Fix the way host and group vars are loaded
* In the VariableManager, we were not properly tracking if a file
  had already been loaded, so we continuously append data to the end
  of the list there for host and group vars, meaning large sets of data
  are duplicated multiple times
* In the inventory, we were merging the host/group vars with the vars
  local to the host needlessly, as the VariableManager already handles that.
  This leads to needless duplication of the data and makes combining the
  vars in VariableManager take even longer.
2016-06-06 14:57:59 -05:00
Tobias Wolf 7287effb5c Solve performance issue with a large number of groups (#13957)
Ansible excessively checks the file system for the potential presence of
`group_vars` and `host_vars` files.

For large numbers of groups this leads to combinatorial performance
issues.

This commit generates a set of group_vars and host_vars filenames using
`os.listdir()` in every possible location and then checks against the sets
before making a stat of the file system.

Also included in this commit is caching of the base directory lookup
for the inventory.
2016-06-06 14:57:59 -05:00
André Cruz b950f75489 Use the python executable specified by the environment.
(cherry picked from commit 18831c72f9)
2016-06-05 20:09:05 -04:00
André Cruz 535a436703 Fix whitespace.
(cherry picked from commit 1fdb7613f1)
2016-06-05 20:09:05 -04:00
James Cammarata 41dde7259b Mark implicitly hosts as such and exclude them from the all group
Fixes #16059
2016-06-04 18:54:50 -05:00
nitzmahone 31f6e26009 bump extras submodule ref for azure fixes 2016-06-04 16:18:01 -07:00
nitzmahone 6e6dd98b86 update azure minimum doc fragment 2016-06-04 16:06:29 -07:00
nitzmahone a538b222dc updates for Azure SDK 2.0.0rc4 2016-06-04 15:08:52 -07:00
James Cammarata 55d40b50e4 Removing __version__ from __init__.py since it belongs in release.py 2016-06-04 11:00:08 -05:00
Toshio Kuratomi 6f5d1456bc Fix the copy action to not send the content as a param. (#16127)
Has already been transferred as a tempfile.

This fixes the error in https://github.com/ansible/ansible/issues/16125
but there may be higher level issues that should be fixed as well (other
modules might be able to cause status fields like failed and changed to
return a censored string instead of a bool).  So leaving 16125 open for
now.
2016-06-03 12:34:20 -07:00
nitzmahone 7ddf6dbbd4 bump core submodule ref 2016-06-03 10:48:30 -07:00
nitzmahone 9f32dcdd28 bump core submodule ref 2016-06-03 10:22:38 -07:00
Bede Carroll 1b5ec51988 Add validate_certs to vmware doc fragment (#16096) 2016-06-02 10:22:40 -04:00
Brian Coca 71350e022a fixed confusing reporting when no hosts are matched
it was hitting 'empty inventory' warning.
2016-06-01 08:35:30 -04:00
Brian Coca ff9641a6a1 fix version/help when missing action
also fixed issues with galaxy cli options
2016-06-01 08:35:30 -04:00
Brian Coca a59478f4f4 treat roles_path as the list it is for galaxy list 2016-06-01 08:35:30 -04:00
Brian Coca 2452bd2135 moved validate info from vvv to debug 2016-05-27 09:44:42 -04:00
Chris Houseknecht 0de111be00 Fix missing ignore_certs (#16018) 2016-05-27 09:43:41 -04:00
Toshio Kuratomi 2399dd32a8 Update submodule refs -- get them back onto the stable-2.1 branch instead of devel 2016-05-26 16:29:41 -07:00
Toshio Kuratomi ccbcb4b5e9 Update grep for six to not falsely trigger when six is only a substring of a different library 2016-05-26 16:16:21 -07:00
Toshio Kuratomi 4304574c00 Fix unarchive failures when the destination is a symlink to a directory (#15999)
Also add integration tests for this problem and unicode filenames inside
a tarball.

Fixes #3725
2016-05-26 15:02:10 -07:00
James Cammarata a44743d142 Use get_dep_chain() instead of directly using a blocks _dep_chain
Child blocks (whether nested or via includes) don't get a copy of the
dependency chain, so the above method should be used to ensure the block
looks at its parents dep chain.

Fixes #15996
2016-05-26 15:48:53 -04:00
nitzmahone 4bf9cf6e41 mark azure_rm_X_facts modules as TECH PREVIEW 2016-05-25 16:02:39 -07:00
nitzmahone f3c9672fc0 add missing azure_rm modules to changelog 2016-05-25 14:25:03 -07:00
Toshio Kuratomi 52c9e2ffcf Add strings 'True' and 'False' as booleans as python bools converted to strings will look that way.
Workaround for custom modules which are using choices=BOOLEANS instead
of type='bool'.
2016-05-25 10:38:35 -07:00
Adrian Likins b3ca832025 Add ChangeLog for pkg_util bug fix. 2016-05-25 12:27:26 -04:00
James Cammarata 09fa05373b When adding nested blocks, don't pass in any parent includes
We previously changed block behavior to prefer includes over parent
blocks, which broke inheritence in nested blocks.

Fixes #15926
2016-05-25 06:59:37 -07:00
Toshio Kuratomi a6bff1e49c Add Changelog for optional pycrypto. 2016-05-25 06:59:37 -07:00
Peter Oliver faf85ec57c Catch DistributionNotFound when pycrypto is absent (#15731)
* Catch DistributionNotFound when pycrypto is absent

On Solaris 11, module `pkg_resources` throws `DistributionNotFound` on import if `cryptography` is installed but `pycrypto` is not.  This change causes that situation to be handled gracefully.

I'm not using Paramiko or Vault, so I my understanding is that I don't
need `pycrpto`.  I could install `pycrypto` to make the error go away, but:
- The latest released version of `pycrypto` doesn't build cleanly on Solaris (https://github.com/dlitz/pycrypto/issues/184).
- Solaris includes an old version of GMP that triggers warnings every time Ansible runs (https://github.com/ansible/ansible/issues/6941).  I notice that I can silence these warnings with `system_warnings` in `ansible.cfg`, but not installing `pycrypto` seems like a safer solution.

* Ignore only `pkg_resources.DistributionNotFound`, not other exceptions.
2016-05-25 06:59:37 -07:00
James Cammarata d3367dd722 Updating RELEASES.txt 2016-05-25 08:52:51 -04:00
Toshio Kuratomi 1867adfabc Update submodule ref to include parameter change to junos module. 2016-05-24 11:48:32 -07:00
James Cammarata 9e622dcf31 New release v2.1.0.0-0.4.rc4 2016-05-22 16:44:41 -04:00
Matt Clay 68807c461b Add support for shippable.com CI tests.
This will allow offloading sanity tests from Travis to Shippable.

(cherry picked from commit 0e8930640a)
2016-05-21 16:22:05 -07:00
Toshio Kuratomi 3bef107872 Fix the error handling for loop squashing to restore the name argument into the task args (#15949) 2016-05-21 07:34:36 -07:00
Toshio Kuratomi 6b964a1594 Update test off make_become for ssh pty race mitigation (#15931) 2016-05-19 15:20:32 -07:00
Toshio Kuratomi 36f2312071 Update core submodule to pull in docker documentation fix 2016-05-19 14:42:33 -07:00
Matt Clay b970e2ca80 Add work-around for ssh pty race condition.
This should minimize loss of stdout when using
a pty and connecting with ssh or paramiko_ssh.

(cherry picked from commit bad293ae35)
2016-05-19 10:34:09 -07:00
James Cammarata 036547b4dd New release v2.1.0.0-0.3.rc3 2016-05-18 15:48:24 -04:00
chouseknecht eae26891ea Fix typo.
(cherry picked from commit 0bc23a4408)
2016-05-18 15:12:42 -04:00
chouseknecht b5ccbf2986 Update ansible-container links.
(cherry picked from commit 22de8a9a21)
2016-05-18 15:12:25 -04:00
chouseknecht 7bfc3df933 Fix italics and version directives.
(cherry picked from commit 0c80b71d71)
2016-05-18 15:12:09 -04:00
chouseknecht 1aa39ae78b Add docker_service and mention ansible-container.
(cherry picked from commit 10c5d9e749)
2016-05-18 15:09:14 -04:00
James Cammarata a6150dbc14 More playbook executor compatibility fixes
With some earlier changes, continuing to forward failed hosts on
to the iterator with each TQM run() call was causing plays with
max_fail_pct set to fail, as hosts which failed in previous plays
were counting those old failures against the % calculation.

Also changed the linear strategy's calculation to use the internal
failed list, rather than the iterator, as this now represents the
hosts failed during the current run only.
2016-05-18 14:09:04 -04:00
Toshio Kuratomi 92e4f25066 Make load_params into a function that custom modules can access (#15913) 2016-05-18 10:55:46 -07:00
Dag Wieers b3676392a8 Fix small typo in ansible.cfg (#15912) 2016-05-18 12:29:30 -04:00
machilde 41f45336a9 Fixed issue with parents havings depth of 3+ on add method of NetworkConfig 2016-05-18 09:31:54 -04:00
James Cammarata 710a96956e In TQM run() mark any entries in _failed_hosts as failed in the PlayIterator
As noted in the comment, the TQM may be used for more than one play. As such,
after creating the new PlayIterator object it is necessary to mark any failed
hosts from previous calls to run() as failed in the iterator, so they are
properly skipped during any future calls to run().
2016-05-18 08:17:53 -04:00
James Cammarata 7855612805 Re-remove checking for failed state on hosts when building list of hosts
This was re-added by 63471cd (and modified by me to use iterator again),
it simply needs to be removed.

Fixes #15395
2016-05-18 08:17:53 -04:00
Vincent Roy b0259a8631 Make sure that serial failures are handled correctly. 2016-05-18 08:17:53 -04:00
Vincent Roy 5e9b13cb94 Don't let max_fail_percentage affect future plays. 2016-05-18 08:17:53 -04:00
Vincent Roy 46e9e4c4da Handle max_fail_percentage per task. 2016-05-18 08:17:53 -04:00
Vincent Roy 9602e43952 Don't stop executing plays after failure.
https://github.com/ansible/ansible/pull/13750/files
2016-05-18 08:17:53 -04:00
Vincent Roy c901c9144c Backward compatibility execution failures with 1.9 2016-05-18 08:17:53 -04:00
Mike Bryant 76cd7cadfe Allow ssh agent usage for junos_netconf
By default the `Shell` class disables ssh agents. The `junos_netconf`
module uses this class, but doesn't re-enable agents.
Here it's explicitly enabled again, so an ssh agent can be used to
connect to and configure Junos devices.
2016-05-18 07:55:31 -04:00
Kei Nohguchi 94e4e4105d openswitch.py: Fix the OpenSwitch REST authentication
It's a cookie based authentication, that we get it
through /login endpoint, called by connect() method
and save the cookie for the rest of the call.
2016-05-18 07:41:04 -04:00
Peter Sprygada 309aba128c handle name resolution errors more gracefully from shell.py
This change will catch socket.gaierror exceptions from shell.py and
return a more friendly message to the user
2016-05-18 06:55:17 -04:00
nitzmahone ff346a199c fix windows integration tests to run under kerberos users 2016-05-17 15:32:32 -07:00
Peter Sprygada 2259b90827 bugfix for issue with trying to eval contains with non-string
fixes ansible/ansible-modules-core#3502
2016-05-17 06:46:03 -04:00
Brian Coca cd2991c02d made format more flexible and allow for non dict entries 2016-05-16 14:40:11 -04:00
Peter Sprygada d9fa5a3b80 fixes #15496
changed to using OrderedDict to preserve order of lines
2016-05-16 14:05:09 -04:00
Michael Scherer e0112a3db3 Fix pkgin detection on NetBSD 6 and 7 (#15834)
Since this is now the default package manager, it got moved
to another location on Netbsd :

  netbsd# type pkgin
  pkgin is a tracked alias for /usr/pkg/bin/pkgin
  netbsd# uname -a
  NetBSD netbsd.example.org 6.1.4 NetBSD 6.1.4 (GENERIC) amd64

But since the package manager is also used outside of NetBSD, we
have to keep the /opt/local path too.
2016-05-16 10:14:46 -04:00
Nathaniel Case 8f4243c3ec Check for jxmlease when using netconf on JUNOS. (#15835) 2016-05-16 10:10:26 -04:00
Kei Nohguchi 956829f0f0 net_template.py: Fix jinja2 template file search path (#15134)
The change is needed to support the multiple include statements
inside the jinja2 template file, as in '{% include ['another.j2'] %}'.
statement.  I need this capability, as OpenSwitch `switch` role needs
to handle multiple *.j2 files and supporting the include statement
inside jinja2 file is essential, otherwise I need to combine multiple
template files into a single file, which easily causes conflicts
between developers working on different parts of the teamplate, ports
and interface.
2016-05-16 10:10:04 -04:00
camradal af5195e336 vCloud module utils error handling bug fixes (#15859)
* Fix AttributeError that hides login errors

* Typo fixes for vca error messages
2016-05-13 23:59:47 -07:00
Robin Roth 03e7f54071 Don't use 'from ansible.module_utils import foo' style here as it breaks (#15756)
py.test"
2016-05-13 23:59:39 -07:00
James Cammarata 0ad0b3d83b New release v2.1.0.0-0.2.rc2 2016-05-13 17:07:03 -04:00
James Cammarata b40948acd9 Adding accelerated mode deprecation to the CHANGELOG 2016-05-13 17:00:36 -04:00
James Cammarata cc347dcaa9 Adding a deprecation message for accelerated mode 2016-05-13 17:00:06 -04:00
James Cammarata 4c5dddb25a Reworking retry/until logic to fix bugs
Prior to this patch, the retry/until logic would fail any task that
succeeded if it took all of the alloted retries to succeed. This patch
reworks the retry/until logic to make things more simple and clear.

Fixes #15697
2016-05-13 16:09:17 -04:00
jctanner b181fe0bc2 Disable sftp batch mode if sshpass (#15829)
Make use of the -oBatchMode=no option to force password prompts from sftp

Addresses #13401
2016-05-13 13:52:30 -04:00
Toshio Kuratomi 71c7476199 Update submodule refs 2016-05-13 10:15:52 -07:00
Matt Martz 9121ca2f8e Use .code instead of .getcode() as py24 does not have .getcode(). Fixes https://github.com/ansible/ansible-modules-core/issues/3608 2016-05-13 12:11:15 -05:00
Toshio Kuratomi cae6240e5e Ship constants to the modules via internal module params rather than a secondary dict. 2016-05-13 10:09:28 -07:00
James Cammarata 2f302e26f4 Make sure setting facts with run_once makes copies of the data
When using run_once, there is only one dict of facts so passing that
to the VariableManager results in the fact cache containing the same
dictionary reference for all hosts in inventory. This patch fixes that
by making sure we pass a copy of the facts dict to VariableManager.

Fixes #14279
2016-05-13 10:05:50 -04:00
Matt Martz 1968bc5952 Cascade ssh_*args configurations in synchronize instead of limiting to just ssh_args. See https://github.com/ansible/ansible-modules-core/issues/3370 2016-05-12 19:01:09 -05:00
Matt Martz 047d62cce3 Guard against a shell profile printing extraneous data 2016-05-12 18:33:54 -05:00
James Cammarata 1c2f0ae8f7 Simply being in an always block shouldn't mean a host did not fail
Previously the changed code was necessary, however it is now problematic
as we've started using the is_failed() method in other places in the code.
Additional changes at the strategy layer should make this safe to remove
now.

Fixes #15625
2016-05-12 17:12:07 -04:00
James Cammarata 3a052654f3 Do not include params when getting role vars in certain situations
In VariableManager, we fetch the params specifically in the next step,
so including them in the prior step is unnecessary and could lead to things
being overridden in an improper order.

In Block, we should not be getting the params for the role as they are
included earlier via the VariableManager.

Fixes #14411
2016-05-12 17:12:00 -04:00
Carl 2a512affde Fixes #15745 playbook include: Conditional scoping
Fixes #15745
Applies conditional forwarding to all tasks/roles within the included playbook.
The existing line only applies forwarded conditionals to the main Task block, and misses pre_, post_, and roles.

Typo ::

Made a selection mistake when I copied over the one line change
2016-05-12 15:02:46 -04:00
Toshio Kuratomi 90fb1fb3fa If we can't squash for any reason, then simply do not optimize the items loop.
Also add more squashing testcases

Fixes #15649
2016-05-12 11:11:05 -07:00
Toshio Kuratomi 8cd0c432e7 Fixed importing the libcloud modules to give a nice error rather than a traceback. 2016-05-12 09:14:51 -07:00
Vic Iglesias 151df71464 Update GCE module to use JSON credentials (#13623)
* Update GCE module to use JSON credentials

* Ensure minimum libcloud version when using JSON crednetials for GCE

* Relax langauge around libcloud requirements
2016-05-12 09:14:51 -07:00
James Cammarata d22898a27c Clear blocked hosts when a role duplicate task is found in free strategy
In the free strategy, we mark a host as blocked when it has work to do
(the PlayIterator returns a task) to prevent multiple tasks from being sent
to the host. However, we check for role duplicates after setting the blocked
flag, but were not clearing that when the task was skipped leading to an
infinite loop. This patch corrects that by clearing the blocked flag when
the task is skipped.

Fixes #15681
2016-05-12 10:42:51 -04:00
Lars Kellogg-Stedman 22b86bbb63 do not erroneously set gathered_facts=True
In `lib/ansible/executor/play_iterator.py`, ansible sets a host's
`_gathered_facts` property to `True` without checking to see if there
are any tasks to be executed.  In the event that the entire play is
skipped, `_gathered_facts` will be `True` even though the `setup`
module was never run.

This patch modifies the logic to only set `_gathered_facts` to `True`
when there are tasks to execute.

Closes #15744.
2016-05-12 10:23:06 -04:00
Andrew Taumoefolau 00b04ab794 Apply inventory host restrictions by host name rather than UUID.
Issue #15633 observes that a meta: inventory_refresh task causes the playbook
to exit. An inventory refresh flushes all caches and rebuilds all host
objects, assigning new UUIDs to each. These new host UUIDs currently fail to
match those on host objects stored for restrictions in the inventory, causing
the playbook to exit for having no hosts to run further tasks against.

This changeset attempts to address this issue by storing host restrictions
by name, and comparing inventory host names against these names when applying
restrictions in get_hosts.
2016-05-12 09:52:36 -04:00
James Cammarata af257c20da Change error about loop variable in use to a warning 2016-05-11 21:50:51 -04:00
Toshio Kuratomi 0040c6a4d2 Add fix for 15601 to the ChangeLog 2016-05-11 17:56:48 -07:00
Toshio Kuratomi 8a84ef80e2 Strip junk after JSON return. (#15822)
Fixes #15601
2016-05-11 17:55:20 -07:00
Toshio Kuratomi e9406bcfd3 Backport some python3 fixes for facts 2016-05-10 21:47:19 -07:00
Brian Coca 1c21baa706 restore old jsonfile behaviour on key expiration
fixes #14456, now it won't expire keys in middle of a play when they
were 'valid' at 'gather time'.
2016-05-10 15:55:46 -04:00
Brian Coca 8a72972360 fix bad assignment, method modifies by ref already
fixes #15694
2016-05-10 12:08:22 -04:00
Toshio Kuratomi b7cab0533e Remove reload from arg related tests. Changes to how ziploader passes args mean we don't need reload anymore. (#15782) 2016-05-10 08:14:46 -07:00
Toshio Kuratomi 6e8b12690b Add s3_bucket fix 2016-05-10 08:14:46 -07:00
Toshio Kuratomi e840816da2 Add s3_bucket fix to CHANGELOG 2016-05-10 08:14:46 -07:00
Matthew Stoltenberg fc4edcbedc add repr for hostvars (#15793)
* allows passing full hostvars to a module
2016-05-10 10:24:06 -04:00
Toshio Kuratomi 31f17d1191 Update submodule ref 2016-05-10 07:12:56 -07:00
Toshio Kuratomi d2e8e8d6a7 Switch to a different url for testing SNI right now. (#15798) 2016-05-10 07:10:28 -07:00
Kamjar Gerami 6f99f40f37 fixes-#15685-tools-that-paginate-show-spurious-less-output: less --version outputs to standard out not to standard error so this changes the redirect from 2> to > (#15720)
fixes-#15685-tools-that-paginate-show-spurious-less-output: Updated redirect to include stderr as well as stdout to not show any errors on screen
2016-05-09 16:50:25 -04:00
Toshio Kuratomi be28443943 Strip leading and trailing whitespace for json arg types 2016-05-09 08:15:23 -07:00
nitzmahone 9620eadc03 bump extras submodule ref 2016-05-06 09:48:25 -07:00
Toshio Kuratomi 73dd183394 Update submodule refs 2016-05-06 08:15:05 -07:00
James Cammarata 506e6255b2 Fixing bugs in strategies
* Don't filter hosts remaining based on their failed state. Instead rely
  on the PlayIterator to return None/ITERATING_COMPLETE when the host is
  failed.
* In the free strategy, make sure we wait outside the host loop for all
  pending results to be processed.
* Use the internal _set_failed_state() instead of manually setting things
  when a failed child state is hit

Fixes #15623
2016-05-06 09:21:20 -04:00
nitzmahone 4574a0e82d bump submodule refs 2016-05-05 15:45:39 -07:00
nitzmahone 761cdc794e add jimi-c's unit test for squashed skip results, tweaked is_skipped() logic to pass 2016-05-05 15:30:58 -07:00
Andrew Taumoefolau da1e62a6d9 Don't assume a task with non-dict loop results has been skipped.
This changeset addresses the issue reported here:

  ansible/ansible-modules-core#1765

The yum module (at least) includes its task results as strings, rather than
dicts, and the code this changeset replaces assumed that in that instance the
task was skipped. The updated behaviour assumes that the task has been
skipped only if:

* results exist, and
* all results are dicts that include a truthy skipped value
2016-05-05 15:30:50 -07:00
Matt Hite 198f57b61e New inventory_ip_type option in gce inventory tool 2016-05-05 10:22:46 -07:00
Brian Coca 90fef85143 check that variable first
before using string methods to check for magic interpreter var
2016-05-05 11:15:36 -04:00
camradal a355ebcbb8 Fix logging into vCloud Director and expose verify_certs argument (#15533) 2016-05-05 09:52:27 -04:00
nitzmahone 2c8715178f error message cleanup 2016-05-04 09:44:36 -07:00
Tobias Wolf 7662b06d5b actionable.py: Do not print next task banner in handler callback (#15698)
Fix actionable callback plugin to not print the banner of the previous
task.

When a handler is executed there is no task banner, so in case it is run,
it will reference the banner from the preceding task.

**Author:** @hvhaugwitz

Test case:

      ---

      - name: actionable filter
        hosts: all
        handlers:
          - name: handler
            command: "true"
        tasks:
          - name: task 1
            file: path=/tmp/test state=touch
            notify: handler
          - name: task 2
            file: path=/tmp/test state=absent
          - name: task 3
            file: path=/tmp/test state=absent
          - name: task 4
            file: path=/tmp/test state=absent
          - name: task 5
            file: path=/tmp/test state=absent
          - name: task 6
            file: path=/tmp/test state=absent

Example output:

BEFORE
------

      PLAY [actionable filter] *******************************************************

      TASK [task 1] ******************************************************************
      changed: [localhost]

      TASK [task 2] ******************************************************************
      changed: [localhost]

      RUNNING HANDLER [handler] ******************************************************

      TASK [task 6] ******************************************************************
      changed: [localhost]

      PLAY RECAP *********************************************************************
      localhost                  : ok=8    changed=3    unreachable=0    failed=0

AFTER
-----

      PLAY [actionable filter] *******************************************************

      TASK [task 1] ******************************************************************
      changed: [localhost]

      TASK [task 2] ******************************************************************
      changed: [localhost]

      RUNNING HANDLER [handler] ******************************************************
      changed: [localhost]

      PLAY RECAP *********************************************************************
      localhost                  : ok=8    changed=3    unreachable=0    failed=0
2016-05-03 11:26:37 -04:00
Toshio Kuratomi e52caba801 Update submodule ref to fix ansible-doc for azure modules 2016-05-03 07:53:26 -07:00
Toshio Kuratomi efbcd8bda0 Add a jsonarg type to arg spec (#15701)
This makes sure that if we get a list or dict that it is turned into
a jsonified string.
2016-05-03 10:21:23 -04:00
Martin Matuska 86768fdcc8 Treat "static: yes/no" with higher priority than "task_includes_static" in ansible.cfg 2016-05-03 09:19:05 -04:00
Martin Matuska e1b7d4cb4d Restore Ansible 2.0 compatibility for includes 2016-05-03 09:18:59 -04:00
Toshio Kuratomi 641b4cc952 Add unarchive fixes to changelog 2016-05-02 11:32:19 -07:00
Toshio Kuratomi 7cc7684abd Update submodule refs 2016-05-02 11:31:20 -07:00
jctanner 16c1f10e18 Remove the ziploader provided pythonpaths from the env inside run_com… (#15674)
Remove the ziploader provided pythonpaths from the env inside run_command.

Fixes #15655
2016-05-02 11:24:31 -04:00
Brian Coca 4abadead76 make vi the default editor if no EDITOR
fixes #15577
2016-05-02 08:45:01 -04:00
James Cammarata c7cae3b08c Don't fail hosts when using any_errors_fatal when ignoring errors
Fixes #15431
2016-05-01 12:40:49 -04:00
Yannig 40c0f34c41 When var name is the same as var content, try to template it before reporting that var is not defined. (#13629)
* When var name is the same as var content, try to template it before reporting that var is not defined.
Add asserts in test_var_blending to check this special corner case.

* Fix integration tests when using debug with list or dict.
2016-05-01 06:48:54 -07:00
Toshio Kuratomi 48fa4f842e Get rid of logentries.to_unicode.
It wasn't doing anything that a literal couldn't do and used
unicode_escape which only understands latin1 (The author of the code
seems to have thought it took an encoding argument but it looks like
that was silently ignored.)
2016-04-30 07:49:51 -07:00
Toshio Kuratomi 145d8626d0 Fix inventory on python3 2016-04-30 07:29:45 -07:00
Toshio Kuratomi e27b22162a bytes when passing to os.path.* and opening the file, text at other times
Fixes #15644
2016-04-29 22:24:47 -07:00
Toshio Kuratomi 92c299cfd5 Update submodule refs 2016-04-29 22:00:33 -07:00
Toshio Kuratomi 5909a4473d Fix the mapping of module_name to Locks
This was reinitialized every time we forked before so we weren't sharing
the same Locks.  It also was not accounting for modules which were
directly invoked by an action plugin instead of going through the
strategy plguins.
2016-04-29 21:58:14 -07:00
Toshio Kuratomi d5585220a4 Fix the version number 2016-04-29 14:01:14 -07:00
Toshio Kuratomi 1e5708514b Fix ziploader for the cornercase of ansible invoking ansible.
* Make ziploader's ansible and ansible.module_utils libraries into
  namespace packages.
* Move __version__ and __author__ from ansible/__init__ to
  ansible/release.py.  This is because namespace packages only load one
  __init__.py.  If that is not the __init__.py with the author and
  version info then those won't be available.
* In ziploader, move the version ito ANSIBLE_CONSTANTS.
* Change PluginLoader to properly construct the path to the plugins even
  when namespace packages are present.
2016-04-29 11:15:24 -07:00
Brian Coca a582664bc6 fix typo 2016-04-29 13:50:52 -04:00
Brian Coca 08ce27edfb fixed method signature as per #15668 2016-04-29 13:13:36 -04:00
chouseknecht bea94efd73 Add aliases 2016-04-29 01:29:04 -04:00
chouseknecht 92f058b1f5 Updated doc fragment to be consistent with the getting started guide. 2016-04-29 01:28:53 -04:00
chouseknecht 7ed6270808 Fixed docker_host definition. 2016-04-29 01:26:13 -04:00
chouseknecht 6db3d4887e Change command line parameters to parameters. 2016-04-29 01:25:28 -04:00
chouseknecht 5e7e55830f Adding getting started guide for Docker 2016-04-29 01:25:06 -04:00
chouseknecht 8f31634977
Update submodules 2016-04-29 01:15:26 -04:00
chouseknecht 688a7d3668
Updating submodules 2016-04-29 01:05:11 -04:00
chouseknecht 8bef5ccae9
Updating submodules 2016-04-29 00:59:41 -04:00
Brian Coca 74b4d7a2ae API now connects to server lazily (#15632)
This should fix most issues with offline operation.

Fixes #14486
Fixes #13991

Alternate to #15363 and #15593
2016-04-28 23:28:49 -04:00
Dag Wieers 5a9e826647 Rename function bool() as it is a Python built-in function (#15651) 2016-04-28 14:42:21 -04:00
Toshio Kuratomi 3c42724d96 Update extras submodules 2016-04-28 08:31:11 -07:00
Toshio Kuratomi 069e032dcd Add git_config module to 2.1 changelog 2016-04-28 08:30:48 -07:00
Toshio Kuratomi 9535feb90a Update submodule ref 2016-04-28 08:15:29 -07:00
Chris Houseknecht 0b28f708c4 Update and fix typos in docker inventory docstrings. (#15616) 2016-04-28 07:27:06 -07:00
Toshio Kuratomi 10c3472699 Handle "/usr/bin/env python" style ansible_*interpreter settings.
Fixes #15635
2016-04-28 06:52:39 -07:00
Brian Coca b6c59f89d2 added common azure tags fragment 2016-04-27 17:10:37 -04:00
Brett Inman 8222a83bcd Make -q flag totally quiet for env-setup.fish
The first echo and setup.py steps are not quiet and result in output that is annoying if your Fish is configured to source the file.
2016-04-27 12:33:45 -07:00
Toshio Kuratomi 430bef907a Fix for unittests on python2.6 or less 2016-04-27 10:46:35 -07:00
Toshio Kuratomi d3de771d2e Update submodule ref 2016-04-27 10:42:25 -07:00
Toshio Kuratomi 459c4ec124 Remove the duplicate modstyle parameter 2016-04-27 10:38:41 -07:00
Robin Roth 1c447016c3 extend zypper integration tests (#15596)
* extend zypper integration tests

* fix caching issue of local RPMS on openSUSE 42.1 Leap
* add tests for simultaneous install/remove via prefixes +-
* test fail cases (rm patch or URL)
* test patch install (succes, unchanged second run, fail on wrong name)

* add test for pattern install
2016-04-27 10:38:34 -07:00
Toshio Kuratomi 41f5ce80fa Some algorithmic unittests for the apt and docker modules 2016-04-27 10:38:13 -07:00
Toshio Kuratomi 07e0fceb61 Some beginning python3 porting for urls.py 2016-04-27 10:37:16 -07:00
Toshio Kuratomi caa8fffa01 Fix traceback in fetch_urls when status code is in info as well as the error. 2016-04-27 10:37:05 -07:00
James Cammarata 5954a82dd6 New release v2.1.0.0-0.1.rc1 2016-04-26 16:23:34 -04:00
James Cammarata 7f7e010a32 Submodule update for stable-2.1 branch 2016-04-26 16:18:29 -04:00
228 changed files with 6236 additions and 1844 deletions

1
.gitignore vendored
View file

@ -61,3 +61,4 @@ venv
Vagrantfile
.vagrant
ansible.egg-info/
/shippable/

View file

@ -1,40 +0,0 @@
dist: trusty
sudo: required
services:
- docker
language: python
matrix:
include:
- env: TARGET=centos6
- env: TARGET=centos7 TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro"
- env: TARGET=fedora23 TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro"
- env: TARGET=fedora-rawhide TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro"
- env: TARGET=ubuntu1204
- env: TARGET=ubuntu1404
- env: TARGET=sanity TOXENV=py26
python: 2.6
- env: TARGET=sanity TOXENV=py27
python: 2.7
- env: TARGET=sanity TOXENV=py34
python: 3.4
- env: TARGET=sanity TOXENV=py35
python: 3.5
- env: TARGET=sanity TOXENV=py24
addons:
apt:
sources:
- deadsnakes
packages:
- python2.4
install:
- pip install tox
script:
- ./test/utils/run_tests.sh
notifications:
irc:
channels:
- "chat.freenode.net#ansible-notices"
on_success: change
on_failure: always
skip_join: true
nick: ansibletravis

View file

@ -1,7 +1,134 @@
Ansible Changes By Release
==========================
## 2.1 "The Song Remains the Same" - ACTIVE DEVELOPMENT
## 2.1.6 "The Song Remains the Same" - 06-01-2017
* Security fix for CVE-2017-7481 - data for lookup plugins used as variables was not being correctly marked as "unsafe".
## 2.1.5 "The Song Remains the Same" - 03-27-2017
* Security continued fix for CVE-2016-9587 - Handle some additional corner cases in the way conditionals are parsed and evaluated.
## 2.1.4 "The Song Remains the Same" - 01-16-2017
* Security fix for CVE-2016-9587 - An attacker with control over a client system being managed by Ansible and the ability to send facts back to the Ansible server could use this flaw to execute arbitrary code on the Ansible server as the user and group Ansible is running as.
* Fixed a bug with conditionals in loops, where undefined variables and other errors will defer raising the error until the conditional has been evaluated.
* Added a version check for jinja2-2.9, which does not fully work with Ansible currently.
## 2.1.3 "The Song Remains the Same" - 11-04-2016
* Security fix for CVE-2016-8628 - Command injection by compromised server via fact variables. In some situations, facts returned by modules could overwrite connection-based facts or some other special variables, leading to injected commands running on the Ansible controller as the user running Ansible (or via escalated permissions).
* Security fix for CVE-2016-8614 - apt_key module not properly validating keys in some situations.
###Minor Changes:
* The subversion module from core now marks its password parameter as no_log so
the password is obscured when logging.
* The postgresql_lang and postgresql_ext modules from extras now mark
login_password as no_log so the password is obscured when logging.
* Fixed several bugs related to locating files relative to role/playbook directories.
* Fixed a bug in the way hosts were tested for failed states, resulting in incorrectly skipped block sessions.
* Fixed a bug in the way our custom JSON encoder is used for the to_json* filters.
* Fixed some bugs related to the use of non-ascii characters in become passwords.
* Fixed a bug with Azure modules which may be using the latest rc6 library.
* Backported some docker_common fixes.
## 2.1.2 "The Song Remains the Same" - 09-29-2016
###Minor Changes:
* Fixed a bug related to creation of retry files (#17456)
* Fixed a bug in the way include params are used when an include task is dynamic (#17064)
* Fixed a bug related to including blocks in an include task (#15963)
* Fixed a bug related to the use of hostvars internally when creating the connection plugin. This prevents things like variables using lookups from being evaluated unnecessarily (#17024)
* Fixed a bug where using a variable containing a list for the `hosts` of a play resulted in an list of lists (#16583)
* Fixed a bug where integer values would cause an error if a module param was of type `float` (no issue)
* Fixed a bug with net_template failing if src was not specified (#17726)
* Fixed a bug in "ansible-galaxy import" (#17417)
* Fixed a bug in which INI files incorrectly treated a hosts range as a section header (#15331)
* Fixed a bug in which the max_fail_percentage calculation erroneously caused a series of plays to stop executing (#15954)
* Fixed a bug in which the task names were not properly templated (#16295)
* Fixed a bug causing "squashed" loops (ie. yum, apt) to incorrectly report results (ansible-modules-core#4214)
* Fixed several bugs related to includes:
- when including statically, make sure that all parents were also included statically (issue #16990)
- properly resolve nested static include paths
- print a message when a file is statically included
* Fixed a bug in which module params expected to be float types were not converted from integers (only strings) (#17325)
* Fixed a bug introduced by static includes in 2.1, which prevented notifications from going to the "top level" handler name.
* Fixed a bug where a group_vars or host_vars directory in the current working directory would be used (and would take precedence) over those in the inventory and/or playbook directory.
* Fixed a bug which could occur when the result of an async task did not parse as valid JSON.
* (re)-allowed the use of ansible_python_interpreter lines with more than one argument.
* Fixed several bugs related to the creation of the implicit localhost in inventory.
* Fixed a bug related to an unspecified number of retries when using until.
* Fixed a race-condition bug when creating temp directories before the worker process is forked.
* Fix a bug with async's poll keyword not making use of ansible_python_interpreter to run (and thus breaking when /usr/bin/python is not present on the remote machine.)
* Fix a bug where hosts that started with a range in inventory were being treated as an invalid section header.
Module fixes:
* Fixed a bug where the temporary CA files created by the module helper code were not being deleted properly in some situations (#17073)
* Fixed many bugs in the unarchive module
* Fixes for module ec2:
- Fixed a bug related to source_dest_check when used with non-vpc instances (core#3243)
- Fixed a bug in ec2 where instances were not powering of when referenced via tags only (core#4765)
- Fixed a bug where instances with multiple interfaces were not powering up/down correctly (core#3234)
* Fixes for module get_url:
- Fixed a bug in get_url module to force a download if there is a checksum mismatch regardless of the last modified time (core#4262)
- Fixed a bug in get_url module to properly process FTP results (core#3661 and core#4601)
* Fixed a bug in win_user related to users with disabled accounts/expired passwords (core#4369)
* ini_file:
- Fixed a bug where option lines are now inserted before blank lines.
- Fixed a bug where leading whitespace prevented matches on options.
* Fixed a bug in iam_cert when dup_ok is used as a string.
* Fixed a bug in postgresql_db related to the changed logic when state=absent.
* Fixed a bug where single_transaction and quick were not passed into db_dump for the mysql_db module.
* Fixed a bug where the fetch module was not idempotent when retrieving the target of a symlink.
* Many minor fixes for bugs in extras modules.
###Deprecations:
* Deprecated the use of `_fixup_perms`. Use `_fixup_perms2` instead.
This change only impacts custom action plugins using `_fixup_perms`.
###Incompatible Changes:
* Use of `_fixup_perms` with `recursive=True` (the default) is no longer supported.
Custom action plugins using `_fixup_perms` will require changes unless they already use `recursive=False`.
Use `_fixup_perms2` if support for previous releases is not required.
Otherwise use `_fixup_perms` with `recursive=False`.
## 2.1.1 "The Song Remains the Same" - 07-28-2016
###Minor Changes:
* If the user is not using paramiko or vault, allow Ansible to run if pycrypto is not installed.
* Fixed a bug in pkg_util module that caused "update_catalog must be one of" error if 'update_catalog' arg was used.
* Fixed a bug where psuedo-connection vars (eg, ansible_winrm_transport) defined in group_vars or host_vars were not getting passed to the connection.
* Fixed a bug where temp file permissions on BSDs were not using filesystem acls when available.
* Fixed some bugs in variable dependency resolution. These were mainly related to includes and roles, to bringe the VariableManager code in-line with our documentation.
* Fixed a bug in unarchive, when the destination was a symlinked directory.
* Fixed a bug related to performance when loading a large number of groups.
* Fixed bugs related to the way host and group vars are loaded, which (for large sets of inventory variables) can reduce CPU and memory usage by 50%.
* Fixed a bug where includes were not being implicitly evaluated as static when no loop or variables were being used.
* Fixed several more bugs in relation to the way play execution continues or halts when hosts fail, to bringe the behavior more in line with 1.9.x.
* Fixed bugs related to the use of the underlying shell executable with the script and raw modules.
* Fixed several bugs in relation to the way ssh keys were used with various networking modules.
* Fixed a bug related to the way handlers are tracked internally, which could cause bugs when roles are reused within the same play (allow_duplicates: yes) or when the role dependencies formed a "diamond" pattern.
* Fixed a bug related to setfacl on platforms which do not support the -R option for recursive changes.
* Several fixes to the way async works to prevent race conditions and other bugs
* More fixes to the way failed and unreachable hosts affect future plays
* Fixed a bug in the way the to_json filter encoded some objects
* Fixed a bug in the way roles and dependencies are loaded, and how they inherit params from parent roles.
* Fixed a bug in which the number of retries in a do/until loop was off by one
* Fixed a bug in the way the passwd lookup deals with salts
* When using the local connection, internally the remote_user value is now forced to be the current user even if remote_user is specified, to prevent issues with become settings
* Fix for Azure modules to work with most recent Azure python library (2.0.0rc5)
* Fix for bug related to unreachable hosts and any_errors_fatal in the linear strategy
* Fix for error handling in relation to killed/dead worker processes. If workers are killed via SIGKILL or SIGTERM, this will halt execution of the playbook.
* Fixed a regression in the way we handle variables from dependent roles.
* Added better handling for certain errors thrown from the cryptography.
* Fixed a typo in the azure_rm_storageaccount module.
* Fixed some minor bugs in the os_user_role and cs_volume modules.
* Fixed a bug related to the return value of a low-level inventory API call related to getting variables for hosts and groups.
## 2.1 "The Song Remains the Same" - 05-25-2016
###Major Changes:
@ -38,13 +165,21 @@ Ansible Changes By Release
- azure:
* azure_rm_deployment
* azure_rm_networkinterface
* azure_rm_networkinterface_facts (TECH PREVIEW)
* azure_rm_publicipaddress
* azure_rm_publicipaddress_facts (TECH PREVIEW)
* azure_rm_resourcegroup
* azure_rm_resourcegroup_facts (TECH PREVIEW)
* azure_rm_securitygroup
* azure_rm_securitygroup_facts (TECH PREVIEW)
* azure_rm_storageaccount
* azure_rm_storageaccount_facts (TECH PREVIEW)
* azure_rm_storageblob
* azure_rm_subnet
* azure_rm_virtualmachine
* azure_rm_virtualmachineimage_facts (TECH PREVIEW)
* azure_rm_virtualnetwork
* azure_rm_virtualnetwork_facts (TECH PREVIEW)
- cloudflare_dns
- cloudstack
* cs_cluster
@ -70,6 +205,7 @@ Ansible Changes By Release
* eos_config
* eos_eapi
* eos_template
- git_config
- gitlab
* gitlab_group
* gitlab_project
@ -160,7 +296,7 @@ Ansible Changes By Release
###Minor Changes:
* Added support for pipelining mode to more connection plugins, which helps prevent
* Added support for pipelining mode to more connection plugins, which helps prevent
module data from being written to disk.
* Added a new '!unsafe' YAML decorator, which can be used in playbooks to ensure a
string is not templated. For example: `foo: !unsafe "Don't template {{me}}"`.
@ -172,12 +308,16 @@ Ansible Changes By Release
two custom callback plugins to run in a certain order you can name them
10-first-callback.py and 20-second-callback.py.
* Added (alpha) Centirfy's dzdo as another become meethod (privilege escalation)
* Fixes for unarchive when filenames contain non-ascii characters
* Fixes for s3_bucket when setting an s3_url.
* Fix for connections which return extra data after the module's done sending its information.
###Deprecations:
* Deprecated the use of "bare" variables in loops (ie. `with_items: foo`, where `foo` is a variable).
The full jinja2 variable syntax of `{{foo}}` should always be used instead. This warning will be removed
completely in 2.3, after which time it will be an error.
* Deprecated accelerated mode.
## 2.0.2 "Over the Hills and Far Away"

View file

@ -67,7 +67,7 @@ ifeq ($(OFFICIAL),yes)
DEBUILD_OPTS += -k$(DEBSIGN_KEYID)
endif
else
DEB_RELEASE = 0.git$(DATE)$(GITINFO)
DEB_RELEASE = 100.git$(DATE)$(GITINFO)
# Do not sign unofficial builds
DEBUILD_OPTS += -uc -us
DPUT_OPTS += -u
@ -83,7 +83,7 @@ RPMSPEC = $(RPMSPECDIR)/ansible.spec
RPMDIST = $(shell rpm --eval '%{?dist}')
RPMRELEASE = $(RELEASE)
ifneq ($(OFFICIAL),yes)
RPMRELEASE = 0.git$(DATE)$(GITINFO)
RPMRELEASE = 100.git$(DATE)$(GITINFO)
endif
RPMNVR = "$(NAME)-$(VERSION)-$(RPMRELEASE)$(RPMDIST)"

View file

@ -1,7 +1,6 @@
[![PyPI version](https://img.shields.io/pypi/v/ansible.svg)](https://pypi.python.org/pypi/ansible)
[![PyPI downloads](https://img.shields.io/pypi/dm/ansible.svg)](https://pypi.python.org/pypi/ansible)
[![Build Status](https://travis-ci.org/ansible/ansible.svg?branch=devel)](https://travis-ci.org/ansible/ansible)
[![Build Status](https://api.shippable.com/projects/573f79d02a8192902e20e34b/badge?branch=stable-2.1)](https://app.shippable.com/projects/573f79d02a8192902e20e34b)
Ansible
=======

View file

@ -1,73 +1,83 @@
Ansible Releases at a Glance
============================
Active Development
++++++++++++++++++
2.1 "The Song Remains the Same" - in progress
Released
++++++++
2.0.2 "Over the Hills and Far Away" 04-19-2015
2.0.1 "Over the Hills and Far Away" 02-24-2016
2.0.0 "Over the Hills and Far Away" 01-12-2016
1.9.6 "Dancing In the Streets" 04-15-2016
1.9.5 "Dancing In the Streets" 03-21-2016
1.9.4 "Dancing In the Streets" 10-09-2015
1.9.3 "Dancing In the Streets" 09-03-2015
1.9.2 "Dancing In the Streets" 06-24-2015
1.9.1 "Dancing In the Streets" 04-27-2015
1.9.0 "Dancing In the Streets" 03-25-2015
1.8.4 "You Really Got Me" ---- 02-19-2015
1.8.3 "You Really Got Me" ---- 02-17-2015
1.8.2 "You Really Got Me" ---- 12-04-2014
1.8.1 "You Really Got Me" ---- 11-26-2014
1.7.2 "Summer Nights" -------- 09-24-2014
1.7.1 "Summer Nights" -------- 08-14-2014
1.7 "Summer Nights" -------- 08-06-2014
1.6.10 "The Cradle Will Rock" - 07-25-2014
1.6.9 "The Cradle Will Rock" - 07-24-2014
1.6.8 "The Cradle Will Rock" - 07-22-2014
1.6.7 "The Cradle Will Rock" - 07-21-2014
1.6.6 "The Cradle Will Rock" - 07-01-2014
1.6.5 "The Cradle Will Rock" - 06-25-2014
1.6.4 "The Cradle Will Rock" - 06-25-2014
1.6.3 "The Cradle Will Rock" - 06-09-2014
1.6.2 "The Cradle Will Rock" - 05-23-2014
1.6.1 "The Cradle Will Rock" - 05-07-2014
1.6 "The Cradle Will Rock" - 05-05-2014
1.5.5 "Love Walks In" -------- 04-18-2014
1.5.4 "Love Walks In" -------- 04-01-2014
1.5.3 "Love Walks In" -------- 03-13-2014
1.5.2 "Love Walks In" -------- 03-11-2014
1.5.1 "Love Walks In" -------- 03-10-2014
1.5 "Love Walks In" -------- 02-28-2014
1.4.5 "Could This Be Magic?" - 02-12-2014
1.4.4 "Could This Be Magic?" - 01-06-2014
1.4.3 "Could This Be Magic?" - 12-20-2013
1.4.2 "Could This Be Magic?" - 12-18-2013
1.4.1 "Could This Be Magic?" - 11-27-2013
1.4 "Could This Be Magic?" - 11-21-2013
1.3.4 "Top of the World" ----- 10-29-2013
1.3.3 "Top of the World" ----- 10-09-2013
1.3.2 "Top of the World" ----- 09-19-2013
1.3.1 "Top of the World" ----- 09-16-2013
1.3 "Top of the World" ----- 09-13-2013
1.2.3 "Hear About It Later" -- 08-21-2013
1.2.2 "Hear About It Later" -- 07-05-2013
1.2.1 "Hear About It Later" -- 07-04-2013
1.2 "Right Now" ------------ 06-10-2013
1.1 "Mean Street" ---------- 04-02-2013
1.0 "Eruption" ------------- 02-01-2013
0.9 "Dreams" --------------- 11-30-2012
0.8 "Cathedral" ------------ 10-19-2012
0.7 "Panama" --------------- 09-06-2012
0.6 "Cabo" ----------------- 08-06-2012
0.5 "Amsterdam" ------------ 07-04-2012
0.4 "Unchained" ------------ 05-23-2012
0.3 "Baluchitherium" ------- 04-23-2012
0.0.2 Untitled
0.0.1 Untitled
VERSION RELEASE CODE NAME
++++++++++++++++++++++++++++++
2.4.0 TBD "Dancing Days"
2.3.1 06-01-2017 "Ramble On"
2.3.0 04-12-2017 "Ramble On"
2.2.3 05-09-2017 "The Battle of Evermore"
2.2.2 03-27-2017 "The Battle of Evermore"
2.2.1 01-16-2017 "The Battle of Evermore"
2.2.0 11-01-2016 "The Battle of Evermore"
2.1.6 06-01-2017 "The Song Remains the Same"
2.1.5 03-27-2017 "The Song Remains the Same"
2.1.4 01-16-2017 "The Song Remains the Same"
2.1.3 11-04-2016 "The Song Remains the Same"
2.1.2 09-29-2016 "The Song Remains the Same"
2.1.1 07-28-2016 "The Song Remains the Same"
2.1.0 05-25-2016 "The Song Remains the Same"
2.0.2 04-19-2016 "Over the Hills and Far Away"
2.0.1 02-24-2016 "Over the Hills and Far Away"
2.0.0 01-12-2016 "Over the Hills and Far Away"
1.9.6 04-15-2016 "Dancing In the Streets"
1.9.5 03-21-2016 "Dancing In the Streets"
1.9.4 10-09-2015 "Dancing In the Streets"
1.9.3 09-03-2015 "Dancing In the Streets"
1.9.2 06-24-2015 "Dancing In the Streets"
1.9.1 04-27-2015 "Dancing In the Streets"
1.9.0 03-25-2015 "Dancing In the Streets"
1.8.4 02-19-2015 "You Really Got Me"
1.8.3 02-17-2015 "You Really Got Me"
1.8.2 12-04-2014 "You Really Got Me"
1.8.1 11-26-2014 "You Really Got Me"
1.8.0 11-25-2014 "You Really Got Me"
1.7.2 09-24-2014 "Summer Nights"
1.7.1 08-14-2014 "Summer Nights"
1.7.0 08-06-2014 "Summer Nights"
1.6.10 07-25-2014 "The Cradle Will Rock"
1.6.9 07-24-2014 "The Cradle Will Rock"
1.6.8 07-22-2014 "The Cradle Will Rock"
1.6.7 07-21-2014 "The Cradle Will Rock"
1.6.6 07-01-2014 "The Cradle Will Rock"
1.6.5 06-25-2014 "The Cradle Will Rock"
1.6.4 06-25-2014 "The Cradle Will Rock"
1.6.3 06-09-2014 "The Cradle Will Rock"
1.6.2 05-23-2014 "The Cradle Will Rock"
1.6.1 05-07-2014 "The Cradle Will Rock"
1.6.0 05-05-2014 "The Cradle Will Rock"
1.5.5 04-18-2014 "Love Walks In"
1.5.4 04-01-2014 "Love Walks In"
1.5.3 03-13-2014 "Love Walks In"
1.5.2 03-11-2014 "Love Walks In"
1.5.1 03-10-2014 "Love Walks In"
1.5.0 02-28-2014 "Love Walks In"
1.4.5 02-12-2014 "Could This Be Magic?"
1.4.4 01-06-2014 "Could This Be Magic?"
1.4.3 12-20-2013 "Could This Be Magic?"
1.4.2 12-18-2013 "Could This Be Magic?"
1.4.1 11-27-2013 "Could This Be Magic?"
1.4.0 11-21-2013 "Could This Be Magic?"
1.3.4 10-29-2013 "Top of the World"
1.3.3 10-09-2013 "Top of the World"
1.3.2 09-19-2013 "Top of the World"
1.3.1 09-16-2013 "Top of the World"
1.3.0 09-13-2013 "Top of the World"
1.2.3 08-21-2013 "Right Now"
1.2.2 07-05-2013 "Right Now"
1.2.1 07-04-2013 "Right Now"
1.2.0 06-10-2013 "Right Now"
1.1.0 04-02-2013 "Mean Street"
1.0.0 02-01-2013 "Eruption"
0.9.0 11-30-2012 "Dreams"
0.8.0 10-19-2012 "Cathedral"
0.7.0 09-06-2012 "Panama"
0.6.0 08-06-2012 "Cabo"
0.5.0 07-04-2012 "Amsterdam"
0.4.0 05-23-2012 "Unchained"
0.3.0 04-23-2012 "Baluchitherium"
0.2.0 ? "Untitled"
0.1.0 ? "Untitled"
0.0.2 ? "Untitled"
0.0.1 ? "Untitled"

View file

@ -1 +1 @@
2.1.0
2.1.6.0 1

View file

@ -9,6 +9,9 @@
# Control which tags are included. Set tags to a comma separated list of keys or key:value pairs
#tags=
# Control which locations are included. Set locations to a comma separated list (e.g. eastus,eastus2,westus)
#locations=
# Include powerstate. If you don't need powerstate information, turning it off improves runtime performance.
include_powerstate=yes

View file

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
@ -23,7 +23,7 @@
Azure External Inventory Script
===============================
Generates dynamic inventory by making API requests to the Azure Resource
Manager using the AAzure Python SDK. For instruction on installing the
Manager using the Azure Python SDK. For instruction on installing the
Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/
Authentication
@ -32,7 +32,7 @@ The order of precedence is command line arguments, environment variables,
and finally the [default] profile found in ~/.azure/credentials.
If using a credentials file, it should be an ini formatted file with one or
more sections, which we refer to as profiles. The script looks for a
more sections, which we refer to as profiles. The script looks for a
[default] section, if a profile is not specified either on the command line
or with an environment variable. The keys in a profile will match the
list of command line arguments below.
@ -42,7 +42,7 @@ in your ~/.azure/credentials file, or a service principal or Active Directory
user.
Command line arguments:
- profile
- profile
- client_id
- secret
- subscription_id
@ -61,7 +61,7 @@ Environment variables:
Run for Specific Host
-----------------------
When run for a specific host using the --host option, a resource group is
When run for a specific host using the --host option, a resource group is
required. For a specific host, this script returns the following variables:
{
@ -76,7 +76,7 @@ required. For a specific host, this script returns the following variables:
"version": "latest"
},
"location": "westus",
"mac_address": "00-0D-3A-31-2C-EC",
"mac_address": "00-00-5E-00-53-FE",
"name": "object-name",
"network_interface": "interface-name",
"network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1",
@ -130,6 +130,10 @@ Select hosts for specific tag key by assigning a comma separated list of tag key
AZURE_TAGS=key1,key2,key3
Select hosts for specific locations:
AZURE_LOCATIONS=eastus,westus,eastus2
Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
AZURE_TAGS=key1:value1,key2:value2
@ -138,11 +142,13 @@ If you don't need the powerstate, you can improve performance by turning off pow
AZURE_INCLUDE_POWERSTATE=no
azure_rm.ini
----------------------
As mentioned above you can control execution using environment variables or an .ini file. A sample
------------
As mentioned above, you can control execution using environment variables or a .ini file. A sample
azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case
'azure_rm') with a .ini extension. This provides you with the flexibility of copying and customizing this
script and having matching .ini files. Go forth and customize your Azure inventory!
'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify
a different path for the .ini file, define the AZURE_INI_PATH environment variable:
export AZURE_INI_PATH=/path/to/custom.ini
Powerstate:
-----------
@ -152,13 +158,13 @@ up. If the value is anything other than 'running', the machine is down, and will
Examples:
---------
Execute /bin/uname on all instances in the galaxy-qa resource group
$ ansible -i azure_rm_inventory.py galaxy-qa -m shell -a "/bin/uname -a"
$ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a"
Use the inventory script to print instance specific information
$ contrib/inventory/azure_rm_inventory.py --host my_instance_host_name --pretty
$ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty
Use with a playbook
$ ansible-playbook -i contrib/inventory/azure_rm_inventory.py my_playbook.yml --limit galaxy-qa
$ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa
Insecure Platform Warning
@ -180,11 +186,13 @@ Version: 1.0.0
import argparse
import ConfigParser
import json
import json
import os
import re
import sys
from packaging.version import Version
from os.path import expanduser
HAS_AZURE = True
@ -195,12 +203,9 @@ try:
from azure.mgmt.compute import __version__ as azure_compute_version
from azure.common import AzureMissingResourceHttpError, AzureHttpError
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network.network_management_client import NetworkManagementClient,\
NetworkManagementClientConfiguration
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient,\
ResourceManagementClientConfiguration
from azure.mgmt.compute.compute_management_client import ComputeManagementClient,\
ComputeManagementClientConfiguration
from azure.mgmt.network.network_management_client import NetworkManagementClient
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient
from azure.mgmt.compute.compute_management_client import ComputeManagementClient
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
@ -219,6 +224,7 @@ AZURE_CREDENTIAL_ENV_MAPPING = dict(
AZURE_CONFIG_SETTINGS = dict(
resource_groups='AZURE_RESOURCE_GROUPS',
tags='AZURE_TAGS',
locations='AZURE_LOCATIONS',
include_powerstate='AZURE_INCLUDE_POWERSTATE',
group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP',
group_by_location='AZURE_GROUP_BY_LOCATION',
@ -226,7 +232,7 @@ AZURE_CONFIG_SETTINGS = dict(
group_by_tag='AZURE_GROUP_BY_TAG'
)
AZURE_MIN_VERSION = "2016-03-30"
AZURE_MIN_VERSION = "0.30.0rc5"
def azure_id_to_dict(id):
@ -303,7 +309,7 @@ class AzureRM(object):
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile'] is not None:
@ -322,7 +328,7 @@ class AzureRM(object):
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
arg_credentials[attribute] = getattr(params, attribute)
# try module params
@ -356,14 +362,17 @@ class AzureRM(object):
resource_client = self.rm_client
resource_client.providers.register(key)
except Exception as exc:
self.fail("One-time registration of {0} failed - {1}".format(key, str(exc)))
self.log("One-time registration of {0} failed - {1}".format(key, str(exc)))
self.log("You might need to register {0} using an admin account".format(key))
self.log(("To register a provider using the Python CLI: "
"https://docs.microsoft.com/azure/azure-resource-manager/"
"resource-manager-common-deployment-errors#noregisteredproviderfound"))
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = NetworkManagementClient(
NetworkManagementClientConfiguration(self.azure_credentials, self.subscription_id))
self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Network')
return self._network_client
@ -371,16 +380,14 @@ class AzureRM(object):
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = ResourceManagementClient(
ResourceManagementClientConfiguration(self.azure_credentials, self.subscription_id))
self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id)
return self._resource_client
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = ComputeManagementClient(
ComputeManagementClientConfiguration(self.azure_credentials, self.subscription_id))
self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Compute')
return self._compute_client
@ -403,6 +410,7 @@ class AzureInventory(object):
self.resource_groups = []
self.tags = None
self.locations = None
self.replace_dash_in_groups = False
self.group_by_resource_group = True
self.group_by_location = True
@ -425,6 +433,9 @@ class AzureInventory(object):
if self._args.tags:
self.tags = self._args.tags.split(',')
if self._args.locations:
self.locations = self._args.locations.split(',')
if self._args.no_powerstate:
self.include_powerstate = False
@ -435,7 +446,7 @@ class AzureInventory(object):
def _parse_cli_args(self):
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file for an Azure subscription')
description='Produce an Ansible Inventory file for an Azure subscription')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--debug', action='store_true', default=False,
@ -462,6 +473,8 @@ class AzureInventory(object):
help='Return inventory for comma separated list of resource group names')
parser.add_argument('--tags', action='store',
help='Return inventory for comma separated list of tag key:value pairs')
parser.add_argument('--locations', action='store',
help='Return inventory for comma separated list of locations')
parser.add_argument('--no-powerstate', action='store_true', default=False,
help='Do not include the power state of each virtual host')
return parser.parse_args()
@ -487,7 +500,7 @@ class AzureInventory(object):
except Exception as exc:
sys.exit("Error: fetching virtual machines - {0}".format(str(exc)))
if self._args.host or self.tags > 0:
if self._args.host or self.tags or self.locations:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
@ -524,7 +537,7 @@ class AzureInventory(object):
resource_group=resource_group,
mac_address=None,
plan=(machine.plan.name if machine.plan else None),
virtual_machine_size=machine.hardware_profile.vm_size.value,
virtual_machine_size=machine.hardware_profile.vm_size,
computer_name=machine.os_profile.computer_name,
provisioning_state=machine.provisioning_state,
)
@ -576,7 +589,7 @@ class AzureInventory(object):
host_vars['mac_address'] = network_interface.mac_address
for ip_config in network_interface.ip_configurations:
host_vars['private_ip'] = ip_config.private_ip_address
host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method.value
host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method
if ip_config.public_ip_address:
public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id)
public_ip_address = self._network_client.public_ip_addresses.get(
@ -585,7 +598,7 @@ class AzureInventory(object):
host_vars['ansible_host'] = public_ip_address.ip_address
host_vars['public_ip'] = public_ip_address.ip_address
host_vars['public_ip_name'] = public_ip_address.name
host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method.value
host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method
host_vars['public_ip_id'] = public_ip_address.id
if public_ip_address.dns_settings:
host_vars['fqdn'] = public_ip_address.dns_settings.fqdn
@ -599,6 +612,8 @@ class AzureInventory(object):
selected_machines.append(machine)
if self.tags and self._tags_match(machine.tags, self.tags):
selected_machines.append(machine)
if self.locations and machine.location in self.locations:
selected_machines.append(machine)
return selected_machines
def _get_security_groups(self, resource_group):
@ -653,7 +668,7 @@ class AzureInventory(object):
self._inventory['azure'].append(host_name)
if self.group_by_tag and vars.get('tags'):
for key, value in vars['tags'].iteritems():
for key, value in vars['tags'].items():
safe_key = self._to_safe(key)
safe_value = safe_key + '_' + self._to_safe(value)
if not self._inventory.get(safe_key):
@ -676,17 +691,17 @@ class AzureInventory(object):
file_settings = self._load_settings()
if file_settings:
for key in AZURE_CONFIG_SETTINGS:
if key in ('resource_groups', 'tags') and file_settings.get(key, None) is not None:
if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key):
values = file_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif file_settings.get(key, None) is not None:
elif file_settings.get(key):
val = self._to_boolean(file_settings[key])
setattr(self, key, val)
else:
env_settings = self._get_env_settings()
for key in AZURE_CONFIG_SETTINGS:
if key in('resource_groups', 'tags') and env_settings.get(key, None) is not None:
if key in('resource_groups', 'tags', 'locations') and env_settings.get(key):
values = env_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
@ -713,13 +728,14 @@ class AzureInventory(object):
def _get_env_settings(self):
env_settings = dict()
for attribute, env_variable in AZURE_CONFIG_SETTINGS.iteritems():
for attribute, env_variable in AZURE_CONFIG_SETTINGS.items():
env_settings[attribute] = os.environ.get(env_variable, None)
return env_settings
def _load_settings(self):
basename = os.path.splitext(os.path.basename(__file__))[0]
path = basename + '.ini'
default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini'))
path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path)))
config = None
settings = None
try:
@ -774,11 +790,11 @@ class AzureInventory(object):
def main():
if not HAS_AZURE:
sys.exit("The Azure python sdk is not installed (try 'pip install azure') - {0}".format(HAS_AZURE_EXC))
sys.exit("The Azure python sdk is not installed (try `pip install 'azure>=2.0.0rc5' --upgrade`) - {0}".format(HAS_AZURE_EXC))
if azure_compute_version < AZURE_MIN_VERSION:
sys.exit("Expecting azure.mgmt.compute.__version__ to be >= {0}. Found version {1} "
"Do you have Azure >= 2.0.0rc2 installed?".format(AZURE_MIN_VERSION, azure_compute_version))
if Version(azure_compute_version) < Version(AZURE_MIN_VERSION):
sys.exit("Expecting azure.mgmt.compute.__version__ to be {0}. Found version {1} "
"Do you have Azure >= 2.0.0rc5 installed? (try `pip install 'azure>=2.0.0rc5' --upgrade`)".format(AZURE_MIN_VERSION, azure_compute_version))
AzureInventory()

View file

@ -3,6 +3,8 @@
[collins]
# You should not have a trailing slash or collins
# will not properly match the URI
host = http://localhost:9000
username = blake

View file

@ -201,7 +201,8 @@ class CollinsInventory(object):
response = open_url(query_url,
timeout=self.collins_timeout_secs,
url_username=self.collins_username,
url_password=self.collins_password)
url_password=self.collins_password,
force_basic_auth=True)
json_response = json.loads(response.read())
# Adds any assets found to the array of assets.
assets += json_response['data']['Data']

View file

@ -24,9 +24,16 @@ DOCUMENTATION = '''
Docker Inventory Script
=======================
Generates dynamic inventory by making API requests to one or more Docker daemons. Communicates with the API
by way of docker-py (https://docker-py.readthedocs.org/en/stable/). So before running the script, you will need to
install docker-py:
The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic
because the inventory is generated at run-time rather than being read from a static file. The script generates the
inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the
script contacts can be defined using environment variables or a configuration file.
Requirements
------------
Using the docker modules requires having docker-py <https://docker-py.readthedocs.org/en/stable/>
installed on the host running Ansible. To install docker-py:
pip install docker-py
@ -197,126 +204,123 @@ When run in --list mode (the default), container instances are grouped by:
Configuration:
--------------
You can control the behavior of the inventory script by passing arguments, defining environment variables, or
creating a docker.yml file (sample provided in ansible/contrib/inventory). The order of precedence is command
line args, then the docker.yml file and finally environment variables.
creating a configuration file named docker.yml (sample provided in ansible/contrib/inventory). The order of precedence
is command line args, then the docker.yml file and finally environment variables.
Environment variables:
;;;;;;;;;;;;;;;;;;;;;;
......................
DOCKER_CONFIG_FILE
description: path to docker inventory configuration file.
default: ./docker.yml
To connect to a single Docker API the following variables can be defined in the environment to control the connection
options. These are the same environment variables used by the Docker modules.
DOCKER_HOST
description: Docker daemon URL or Unix socket path.
default: unix://var/run/docker.sock
DOCKER_HOST
The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock.
DOCKER_TLS_HOSTNAME:
description: When DOCKER_TLS_VERIFY is true, provide the expected name of the host.
default: localhost
DOCKER_API_VERSION:
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
by docker-py.
DOCKER_API_VERSION:
description: Version of the Docker API the client will use.
default: DEFAULT_DOCKER_API_VERSION as defined in docker-py
DOCKER_TIMEOUT:
The maximum amount of time in seconds to wait on a response fromm the API. Defaults to 60 seconds.
DOCKER_CERT_PATH:
description: Path to the directory containing the client certificate and key files.
default: None
DOCKER_TLS:
Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server.
Defaults to False.
DOCKER_SSL_VERSION:
description: Version of TLS supported by Docker daemon.
default: None
DOCKER_TLS_VERIFY:
Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
Default is False
DOCKER_TLS:
description: Use TLS when sending requests to Docker daemon. Set to 1, 0, true, false, True, False, yes, no.
default: False
DOCKER_TLS_HOSTNAME:
When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults
to localhost.
DOCKER_TLS_VERIFY:
description: Verify hostname found in TLS certs. Set to 1, 0, true, false, True, False, yes, no.
default: False
DOCKER_CERT_PATH:
Path to the directory containing the client certificate, client key and CA certificate.
DOCKER_TIMEOUT:
description: Docker request timeout in seconds.
default: Value of DOCKER_TIMEOUT as defined in docker-py
DOCKER_SSL_VERSION:
Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing
was 1.0
DOCKER_PRIVATE_SSH_PORT:
description: The private port (container port) on which SSH is listening for connections
default: 22
In addition to the connection variables there are a couple variables used to control the execution and output of the
script:
DOCKER_DEFAULT_IP:
description: This environment variable overrides the container SSH connection
IP address (aka, 'ansible_ssh_host').
DOCKER_CONFIG_FILE
Path to the configuration file. Defaults to ./docker.yml.
This option allows one to override the ansible_ssh_host whenever Docker has exercised its default behavior of
binding private ports to all interfaces of the Docker host. This behavior, when dealing with remote Docker hosts,
does not allow Ansible to determine a proper host IP address on which to connect via SSH to containers. By
default, this inventory module assumes all 0.0.0.0-exposed ports to be bound to localhost:<port>. To override
this behavior, for example, to bind a container's SSH port to the public interface of its host, one must
manually set this IP.
DOCKER_PRIVATE_SSH_PORT:
The private port (container port) on which SSH is listening for connections. Defaults to 22.
It is preferable to begin to launch Docker containers with ports exposed on publicly accessible IP addresses,
particularly if the containers are to be targeted by Ansible for remote configuration, not accessible via
localhost SSH connections. Docker containers can be explicitly exposed on IP addresses by
a) starting the daemon with the --ip argument
b) running containers with the -P/--publish ip::containerPort
argument
default: 127.0.0.1 if port exposed on 0.0.0.0
DOCKER_DEFAULT_IP:
The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
docker.yml
;;;;;;;;;;;;;;;;;;;;
Configuration File
..................
A sample docker.yml file is included in the ansible/contrib/inventory. Using this file is not required. If
the file is not found, environment variables will be used.
Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory.
The default name of the file is derived from the name of the inventory script. By default the script will look for
basename of the script (i.e. docker) with an extension of '.yml'. You can override the default name by passing a
command line argument or setting DOCKER_CONFIG_FILE in the environment.
basename of the script (i.e. docker) with an extension of '.yml'.
You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment.
Here's what you can define in docker_inventory.yml:
* defaults: Defines a default connnection. Defaults will be taken from this and applied to any values not provided
for a host defined in the hosts list.
defaults
Defines a default connection. Defaults will be taken from this and applied to any values not provided
for a host defined in the hosts list.
* hosts: If you wish to get inventory from more than one Docker daemon hosts, define a hosts list.
hosts
If you wish to get inventory from more than one Docker host, define a hosts list.
For a host defined in defaults or hosts, you can provided the following attributes. The only required attribute is host.
For the default host and each host in the hosts list define the following attributes:
host:
description: The URL or Unix socket path for the host.
description: The URL or Unix socket path used to connect to the Docker API.
required: yes
tls:
description: Connect using https://
description: Connect using TLS without verifying the authenticity of the Docker host server.
default: false
required: false
tls_verify:
description: Connect using https:// and verify the host name matches the host name found in the certificate.
description: Connect using TLS without verifying the authenticity of the Docker host server.
default: false
required: false
cert_path:
description: Path to the host's certificate .pem file.
description: Path to the client's TLS certificate file.
default: null
required: false
cacert_path:
description: Path to the host's Certificate Authority .pem file.
description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
default: null
required: false
key_path:
description: Path to the host's encryption key .pem file
description: Path to the client's TLS key file.
default: null
required: false
version:
description: The API version.
description: The Docker API version.
required: false
default: will be supplied by the docker-py module.
timeout:
description: The amount of time in seconds to wait on an API response.
required: false
default: will be supplied by the docker-py module.
default: 60
default_ip:
description: The IP address to assign to ansilbe_host when the container's SSH port is mappped to 0.0.0.0
description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
'0.0.0.0'.
required: false
default: 1267.0.0.1
default: 127.0.0.1
private_ssh_port:
description: The port containers use for SSH
required: false
@ -324,28 +328,25 @@ For a host defined in defaults or hosts, you can provided the following attribut
Examples
--------
# Run the script with Env vars (for when you have Docker toolbox installed)
./docker_inventory.py --pretty
# Connect to docker instance on localhost port 4243
DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty
# Connect to the Docker API on localhost port 4243 and format the JSON output
DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty
# Any container's ssh port exposed on 0.0.0.0 will mapped to
#another IP address (where Ansible will attempt to connect via SSH)
DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty
# Any container's ssh port exposed on 0.0.0.0 will be mapped to
# another IP address (where Ansible will attempt to connect via SSH)
DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty
# Run as input to a playbook:
ansible-playbook -i ~/projects/ansible/contrib/inventory/docker_inventory.py docker_inventory_test.yml
# Run as input to a playbook:
ansible-playbook -i ~/projects/ansible/contrib/inventory/docker.py docker_inventory_test.yml
# Simple playbook to invoke with the above example:
# Simple playbook to invoke with the above example:
- name: Test docker_inventory
hosts: all
connection: local
gather_facts: no
tasks:
- debug: msg="Container - {{ inventory_hostname }}"
- debug: msg="Container - {{ inventory_hostname }}"
'''

View file

@ -1,52 +1,65 @@
# This is the configuration file for the Docker inventory script: docker_inventory.py.
#
# defaults: Defines a default connnection. Defaults will be taken from this and applied to any values not provided
# for a host defined in the hosts list.
# You can define the following in this file:
#
# hosts: If you wish to get inventory from more than one Docker daemon hosts, define a hosts list.
#
# For a host defined in defaults or hosts, you can provided the following attributes. The only required attribute is host.
# defaults
# Defines a default connection. Defaults will be taken from this and applied to any values not provided
# for a host defined in the hosts list.
#
# host:
# description: The URL or Unix socket path for the host.
# required: yes
# tls:
# description: Connect using https://
# default: false
# required: false
# tls_verify:
# description: Connect using https:// and verify the host name matches the host name found in the certificate.
# default: false
# required: false
# cert_path:
# description: Path to the client's certificate .pem file.
# default: null
# required: false
# cacert_path:
# description: Path to the client's Certificate Authority .pem file.
# default: null
# required: false
# key_path:
# description: Path to the client's encryption key .pem file
# default: null
# required: false
# version:
# description: The API version the client will use.
# required: false
# default: will be supplied by the docker-py module.
# timeout:
# description: The amount of time in seconds to wait on an API response.
# required: false
# default: will be supplied by the docker-py module.
# default_ip:
# description: The IP address to assign to ansilbe_host when the container's SSH port is mappped to 0.0.0.0
# required: false
# default: 1267.0.0.1
# private_ssh_port:
# description: The port containers use for SSH
# required: false
# default: 22
#
# hosts
# If you wish to get inventory from more than one Docker host, define a hosts list.
#
# For the default host and each host in the hosts list define the following attributes:
#
# host:
# description: The URL or Unix socket path used to connect to the Docker API.
# required: yes
#
# tls:
# description: Connect using TLS without verifying the authenticity of the Docker host server.
# default: false
# required: false
#
# tls_verify:
# description: Connect using TLS without verifying the authenticity of the Docker host server.
# default: false
# required: false
#
# cert_path:
# description: Path to the client's TLS certificate file.
# default: null
# required: false
#
# cacert_path:
# description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
# default: null
# required: false
#
# key_path:
# description: Path to the client's TLS key file.
# default: null
# required: false
#
# version:
# description: The Docker API version.
# required: false
# default: will be supplied by the docker-py module.
#
# timeout:
# description: The amount of time in seconds to wait on an API response.
# required: false
# default: 60
#
# default_ip:
# description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
# '0.0.0.0'.
# required: false
# default: 127.0.0.1
#
# private_ssh_port:
# description: The port containers use for SSH
# required: false
# default: 22
#defaults:
# host: unix:///var/run/docker.sock

View file

@ -45,3 +45,11 @@ gce_service_account_email_address =
gce_service_account_pem_file_path =
gce_project_id =
[inventory]
# The 'inventory_ip_type' parameter specifies whether 'ansible_ssh_host' should
# contain the instance internal or external address. Values may be either
# 'internal' or 'external'. If 'external' is specified but no external instance
# address exists, the internal address will be used.
# The INVENTORY_IP_TYPE environment variable will override this value.
inventory_ip_type =

View file

@ -69,7 +69,8 @@ Examples:
$ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
Version: 0.0.1
Contributors: Matt Hite <mhite@hotmail.com>
Version: 0.0.2
'''
__requires__ = ['pycrypto>=2.6']
@ -83,7 +84,7 @@ except ImportError:
pass
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
USER_AGENT_VERSION="v1"
USER_AGENT_VERSION="v2"
import sys
import os
@ -111,7 +112,11 @@ class GceInventory(object):
def __init__(self):
# Read settings and parse CLI arguments
self.parse_cli_args()
self.config = self.get_config()
self.driver = self.get_gce_driver()
self.ip_type = self.get_inventory_options()
if self.ip_type:
self.ip_type = self.ip_type.lower()
# Just display data for specific host
if self.args.host:
@ -125,9 +130,13 @@ class GceInventory(object):
pretty=self.args.pretty))
sys.exit(0)
def get_gce_driver(self):
"""Determine the GCE authorization settings and return a
libcloud driver.
def get_config(self):
"""
Populates a SafeConfigParser object with defaults and
attempts to read an .ini-style configuration from the filename
specified in GCE_INI_PATH. If the environment variable is
not present, the filename defaults to gce.ini in the current
working directory.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
@ -142,14 +151,32 @@ class GceInventory(object):
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'libcloud_secrets': '',
'inventory_ip_type': '',
})
if 'gce' not in config.sections():
config.add_section('gce')
config.read(gce_ini_path)
if 'inventory' not in config.sections():
config.add_section('inventory')
config.read(gce_ini_path)
return config
def get_inventory_options(self):
"""Determine inventory options. Environment variables always
take precedence over configuration files."""
ip_type = self.config.get('inventory', 'inventory_ip_type')
# If the appropriate environment variables are set, they override
# other configuration
ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
return ip_type
def get_gce_driver(self):
"""Determine the GCE authorization settings and return a
libcloud driver.
"""
# Attempt to get GCE params from a configuration file, if one
# exists.
secrets_path = config.get('gce', 'libcloud_secrets')
secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
@ -175,10 +202,10 @@ class GceInventory(object):
pass
if not secrets_found:
args = [
config.get('gce','gce_service_account_email_address'),
config.get('gce','gce_service_account_pem_file_path')
self.config.get('gce','gce_service_account_email_address'),
self.config.get('gce','gce_service_account_pem_file_path')
]
kwargs = {'project': config.get('gce', 'gce_project_id')}
kwargs = {'project': self.config.get('gce', 'gce_project_id')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
@ -218,6 +245,12 @@ class GceInventory(object):
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
# default to exernal IP unless user has specified they prefer internal
if self.ip_type == 'internal':
ssh_host = inst.private_ips[0]
else:
ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
@ -233,7 +266,7 @@ class GceInventory(object):
'gce_metadata': md,
'gce_network': net,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
'ansible_ssh_host': ssh_host
}
def get_instance(self, instance_name):

View file

@ -63,7 +63,7 @@ CREATE
The *create* sub-command is used to initialize a new encrypted file.
After providing a password, the tool will launch whatever editor you have defined
with $EDITOR, and defaults to vim. Once you are done with the editor session, the
with $EDITOR, and defaults to vi. Once you are done with the editor session, the
file will be saved as encrypted data.
The default cipher is AES (which is shared-secret based).

View file

@ -42,7 +42,7 @@ The 'ARGUMENTS' to pass to the module.
Use privilege escalation (specific one depends on become_method),
this does not imply prompting for passwords.
*K*, *--ask-become-pass*::
*-K*, *--ask-become-pass*::
Ask for privilege escalation password.

View file

@ -251,7 +251,7 @@ Tower Support Questions
Ansible `Tower <http://ansible.com/tower>`_ is a UI, Server, and REST endpoint for Ansible, produced by Ansible, Inc.
If you have a question about tower, email `support@ansible.com <mailto:support@ansible.com>`_ rather than using the IRC
If you have a question about Tower, visit `support.ansible.com <https://support.ansible.com/>`_ rather than using the IRC
channel or the general project mailing list.
IRC Channel

View file

@ -8,12 +8,12 @@ Requirements
------------
Using the Azure Resource Manager modules requires having `Azure Python SDK <https://github.com/Azure/azure-sdk-for-python>`_
installed on the host running Ansible. You will need to have >= v2.0.0RC2 installed. The simplest way to install the
installed on the host running Ansible. You will need to have == v2.0.0RC5 installed. The simplest way to install the
SDK is via pip:
.. code-block:: bash
$ pip install azure==2.0.0rc2
$ pip install "azure==2.0.0rc5"
Authenticating with Azure
@ -320,6 +320,10 @@ Select hosts for specific tag key by assigning a comma separated list of tag key
* AZURE_TAGS=key1,key2,key3
Select hosts for specific locations by assigning a comma separated list of locations to:
* AZURE_LOCATIONS=eastus,eastus2,westus
Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
* AZURE_TAGS=key1:value1,key2:value2
@ -340,6 +344,9 @@ file will contain the following:
# Control which tags are included. Set tags to a comma separated list of keys or key:value pairs
#tags=
# Control which locations are included. Set locations to a comma separated list of locations.
#locations=
# Include powerstate. If you don't need powerstate information, turning it off improves runtime performance.
# Valid values: yes, no, true, false, True, False, 0, 1.
include_powerstate=yes

View file

@ -0,0 +1,317 @@
Getting Started with Docker
===========================
Ansible offers the following modules for orchestrating Docker containers:
docker_service
Use your existing Docker compose files to orchestrate containers on a single Docker daemon or on
Swarm. Supports compose versions 1 and 2.
docker_container
Manages the container lifecycle by providing the ability to create, update, stop, start and destroy a
container.
docker_image
Provides full control over images, including: build, pull, push, tag and remove.
docker_image_facts
Inspects one or more images in the Docker host's image cache, providing the information as facts for making
decision or assertions in a playbook.
docker_login
Authenticates with Docker Hub or any Docker registry and updates the Docker Engine config file, which
in turn provides password-free pushing and pulling of images to and from the registry.
docker (dynamic inventory)
Dynamically builds an inventory of all the available containers from a set of one or more Docker hosts.
Ansible 2.1.0 includes major updates to the Docker modules, marking the start of a project to create a complete and
integrated set of tools for orchestrating containers. In addition to the above modules, we are also working on the
following:
Still using Dockerfile to build images? Check out `ansible-container <https://github.com/ansible/ansible-container>`_,
and start building images from your Ansible playbooks.
Use the *shipit* command in `ansible-container <https://github.com/ansible/ansible-container>`_
to launch your docker-compose file on `OpenShift <https://www.openshift.org/>`_. Go from an app on your laptop to a fully
scalable app in the cloud in just a few moments.
There's more planned. See the latest ideas and thinking at the `Ansible proposal repo <https://github.com/ansible/proposals/tree/master/docker>`_.
Requirements
------------
Using the docker modules requires having `docker-py <https://docker-py.readthedocs.org/en/stable/>`_
installed on the host running Ansible. You will need to have >= 1.7.0 installed.
.. code-block:: bash
$ pip install 'docker-py>=1.7.0'
The docker_service module also requires `docker-compose <https://github.com/docker/compose>`_
.. code-block:: bash
$ pip install 'docker-compose>=1.7.0'
Connecting to the Docker API
----------------------------
You can connect to a local or remote API using parameters passed to each task or by setting environment variables.
The order of precedence is command line parameters and then environment variables. If neither a command line
option or an environment variable is found, a default value will be used. The default values are provided under
`Parameters`_
Parameters
..........
Control how modules connect to the Docker API by passing the following parameters:
docker_host
The URL or Unix socket path used to connect to the Docker API. Defaults to ``unix://var/run/docker.sock``.
To connect to a remote host, provide the TCP connection string. For example: ``tcp://192.168.99.100:2376``. If
TLS is used to encrypt the connection to the API, then the module will automatically replace 'tcp' in the
connection URL with 'https'.
api_version
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
by docker-py.
timeout
The maximum amount of time in seconds to wait on a response from the API. Defaults to 60 seconds.
tls
Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server.
Defaults to False.
tls_verify
Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
Default is False.
cacert_path
Use a CA certificate when performing server verification by providing the path to a CA certificate file.
cert_path
Path to the client's TLS certificate file.
key_path
Path to the client's TLS key file.
tls_hostname
When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults
to 'localhost'.
ssl_version
Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing
was 1.0
Environment Variables
.....................
Control how the modules connect to the Docker API by setting the following variables in the environment of the host
running Ansible:
DOCKER_HOST
The URL or Unix socket path used to connect to the Docker API.
DOCKER_API_VERSION
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
by docker-py.
DOCKER_TIMEOUT
The maximum amount of time in seconds to wait on a response from the API.
DOCKER_CERT_PATH
Path to the directory containing the client certificate, client key and CA certificate.
DOCKER_SSL_VERSION
Provide a valid SSL version number.
DOCKER_TLS
Secure the connection to the API by using TLS without verifying the authenticity of the Docker Host.
DOCKER_TLS_VERIFY
Secure the connection to the API by using TLS and verify the authenticity of the Docker Host.
Dynamic Inventory Script
------------------------
The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic
because the inventory is generated at run-time rather than being read from a static file. The script generates the
inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the
script contacts can be defined using environment variables or a configuration file.
Groups
......
The script will create the following host groups:
- container id
- container name
- container short id
- image_name (image_<image name>)
- docker_host
- running
- stopped
Examples
........
You can run the script interactively from the command line or pass it as the inventory to a playbook. Here are few
examples to get you started:
.. code-block:: bash
# Connect to the Docker API on localhost port 4243 and format the JSON output
DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty
# Any container's ssh port exposed on 0.0.0.0 will be mapped to
# another IP address (where Ansible will attempt to connect via SSH)
DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty
# Run as input to a playbook:
ansible-playbook -i ~/projects/ansible/contrib/inventory/docker.py docker_inventory_test.yml
# Simple playbook to invoke with the above example:
- name: Test docker_inventory
hosts: all
connection: local
gather_facts: no
tasks:
- debug: msg="Container - {{ inventory_hostname }}"
Configuration
.............
You can control the behavior of the inventory script by defining environment variables, or
creating a docker.yml file (sample provided in ansible/contrib/inventory). The order of precedence is the docker.yml
file and then environment variables.
Environment Variables
;;;;;;;;;;;;;;;;;;;;;;
To connect to a single Docker API the following variables can be defined in the environment to control the connection
options. These are the same environment variables used by the Docker modules.
DOCKER_HOST
The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock.
DOCKER_API_VERSION:
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
by docker-py.
DOCKER_TIMEOUT:
The maximum amount of time in seconds to wait on a response fromm the API. Defaults to 60 seconds.
DOCKER_TLS:
Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server.
Defaults to False.
DOCKER_TLS_VERIFY:
Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
Default is False
DOCKER_TLS_HOSTNAME:
When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults
to localhost.
DOCKER_CERT_PATH:
Path to the directory containing the client certificate, client key and CA certificate.
DOCKER_SSL_VERSION:
Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing
was 1.0
In addition to the connection variables there are a couple variables used to control the execution and output of the
script:
DOCKER_CONFIG_FILE
Path to the configuration file. Defaults to ./docker.yml.
DOCKER_PRIVATE_SSH_PORT:
The private port (container port) on which SSH is listening for connections. Defaults to 22.
DOCKER_DEFAULT_IP:
The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
Configuration File
;;;;;;;;;;;;;;;;;;
Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory.
The default name of the file is derived from the name of the inventory script. By default the script will look for
basename of the script (i.e. docker) with an extension of '.yml'.
You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment.
Here's what you can define in docker_inventory.yml:
defaults
Defines a default connection. Defaults will be taken from this and applied to any values not provided
for a host defined in the hosts list.
hosts
If you wish to get inventory from more than one Docker host, define a hosts list.
For the default host and each host in the hosts list define the following attributes:
.. code-block:: yaml
host:
description: The URL or Unix socket path used to connect to the Docker API.
required: yes
tls:
description: Connect using TLS without verifying the authenticity of the Docker host server.
default: false
required: false
tls_verify:
description: Connect using TLS without verifying the authenticity of the Docker host server.
default: false
required: false
cert_path:
description: Path to the client's TLS certificate file.
default: null
required: false
cacert_path:
description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
default: null
required: false
key_path:
description: Path to the client's TLS key file.
default: null
required: false
version:
description: The Docker API version.
required: false
default: will be supplied by the docker-py module.
timeout:
description: The amount of time in seconds to wait on an API response.
required: false
default: 60
default_ip:
description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
'0.0.0.0'.
required: false
default: 127.0.0.1
private_ssh_port:
description: The port containers use for SSH
required: false
default: 22

View file

@ -11,7 +11,7 @@ Introduction
Ansible contains modules for managing Google Compute Engine resources, including creating instances, controlling network access, working with persistent disks, and managing
load balancers. Additionally, there is an inventory plugin that can automatically suck down all of your GCE instances into Ansible dynamic inventory, and create groups by tag and other properties.
The GCE modules all require the apache-libcloud module, which you can install from pip:
The GCE modules all require the apache-libcloud module which you can install from pip:
.. code-block:: bash
@ -22,16 +22,19 @@ The GCE modules all require the apache-libcloud module, which you can install fr
Credentials
-----------
To work with the GCE modules, you'll first need to get some credentials. You can create new one from the `console <https://console.developers.google.com/>`_ by going to the "APIs and Auth" section and choosing to create a new client ID for a service account. Once you've created a new client ID and downloaded (you must click **Generate new P12 Key**) the generated private key (in the `pkcs12 format <http://en.wikipedia.org/wiki/PKCS_12>`_), you'll need to convert the key by running the following command:
To work with the GCE modules, you'll first need to get some credentials in the
JSON format:
.. code-block:: bash
1. `Create a Service Account <https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount>`_
2. `Download JSON credentials <https://support.google.com/cloud/answer/6158849?hl=en&ref_topic=6262490#serviceaccounts>`_
$ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret -nodes -nocerts | openssl rsa -out pkey.pem
There are three different ways to provide credentials to Ansible so that it can talk with Google Cloud for provisioning and configuration actions:
There are two different ways to provide credentials to Ansible so that it can talk with Google Cloud for provisioning and configuration actions:
.. note:: If you would like to use JSON credentials you must have libcloud >= 0.17.0
* by providing to the modules directly
* by populating a ``secrets.py`` file
* by setting environment variables
Calling Modules By Passing Credentials
``````````````````````````````````````
@ -39,7 +42,7 @@ Calling Modules By Passing Credentials
For the GCE modules you can specify the credentials as arguments:
* ``service_account_email``: email associated with the project
* ``pem_file``: path to the pem file
* ``credentials_file``: path to the JSON credentials file
* ``project_id``: id of the project
For example, to create a new instance using the cloud module, you can use the following configuration:
@ -48,12 +51,12 @@ For example, to create a new instance using the cloud module, you can use the fo
- name: Create instance(s)
hosts: localhost
connection: local
connection: local
gather_facts: no
vars:
service_account_email: unique-id@developer.gserviceaccount.com
pem_file: /path/to/project.pem
credentials_file: /path/to/project.json
project_id: project-id
machine_type: n1-standard-1
image: debian-7
@ -61,28 +64,50 @@ For example, to create a new instance using the cloud module, you can use the fo
tasks:
- name: Launch instances
gce:
instance_names: dev
gce:
instance_names: dev
machine_type: "{{ machine_type }}"
image: "{{ image }}"
service_account_email: "{{ service_account_email }}"
pem_file: "{{ pem_file }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
Calling Modules with secrets.py
```````````````````````````````
When running Ansible inside a GCE VM you can use the service account credentials from the local metadata server by
setting both ``service_account_email`` and ``credentials_file`` to a blank string.
Configuring Modules with secrets.py
```````````````````````````````````
Create a file ``secrets.py`` looking like following, and put it in some folder which is in your ``$PYTHONPATH``:
.. code-block:: python
GCE_PARAMS = ('i...@project.googleusercontent.com', '/path/to/project.pem')
GCE_PARAMS = ('i...@project.googleusercontent.com', '/path/to/project.json')
GCE_KEYWORD_PARAMS = {'project': 'project_id'}
Ensure to enter the email address from the created services account and not the one from your main account.
Now the modules can be used as above, but the account information can be omitted.
If you are running Ansible from inside a GCE VM with an authorized service account you can set the email address and
credentials path as follows so that get automatically picked up:
.. code-block:: python
GCE_PARAMS = ('', '')
GCE_KEYWORD_PARAMS = {'project': 'project_id'}
Configuring Modules with Environment Variables
``````````````````````````````````````````````
Set the following environment variables before running Ansible in order to configure your credentials:
.. code-block:: bash
GCE_EMAIL
GCE_PROJECT
GCE_CREDENTIALS_FILE_PATH
GCE Dynamic Inventory
---------------------
@ -171,7 +196,7 @@ A playbook would looks like this:
machine_type: n1-standard-1 # default
image: debian-7
service_account_email: unique-id@developer.gserviceaccount.com
pem_file: /path/to/project.pem
credentials_file: /path/to/project.json
project_id: project-id
tasks:
@ -181,7 +206,7 @@ A playbook would looks like this:
machine_type: "{{ machine_type }}"
image: "{{ image }}"
service_account_email: "{{ service_account_email }}"
pem_file: "{{ pem_file }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
tags: webserver
register: gce
@ -224,7 +249,7 @@ a basic example of what is possible::
machine_type: n1-standard-1 # default
image: debian-7
service_account_email: unique-id@developer.gserviceaccount.com
pem_file: /path/to/project.pem
credentials_file: /path/to/project.json
project_id: project-id
roles:
@ -238,13 +263,12 @@ a basic example of what is possible::
args:
fwname: "all-http"
name: "default"
allowed: "tcp:80"
state: "present"
service_account_email: "{{ service_account_email }}"
pem_file: "{{ pem_file }}"
allowed: "tcp:80"
state: "present"
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
By pointing your browser to the IP of the server, you should see a page welcoming you.
Upgrades to this documentation are welcome, hit the github link at the top right of this page if you would like to make additions!
Upgrades to this documentation are welcome, hit the github link at the top right of this page if you would like to make additions!

View file

@ -1,7 +1,7 @@
Detailed Guides
```````````````
This section is new and evolving. The idea here is explore particular use cases in greater depth and provide a more "top down" explanation of some basic features.
This section is new and evolving. The idea here is to explore particular use cases in greater depth and provide a more "top down" explanation of some basic features.
.. toctree::
:maxdepth: 1
@ -13,5 +13,6 @@ This section is new and evolving. The idea here is explore particular use cases
guide_cloudstack
guide_vagrant
guide_rolling_upgrade
guide_docker
Pending topics may include: Docker, Jenkins, Google Compute Engine, Linode/DigitalOcean, Continuous Deployment, and more.

View file

@ -74,6 +74,34 @@ different locations::
Most users will not need to use this feature. See :doc:`developing_plugins` for more details.
.. _allow_unsafe_lookups:
allow_unsafe_lookups
====================
.. versionadded:: 2.2.3, 2.3.1
When enabled, this option allows lookup plugins (whether used in variables as `{{lookup('foo')}}` or as a loop as `with_foo`) to return data that is **not** marked "unsafe". By default, such data is marked as unsafe to prevent the templating engine from evaluating any jinja2 templating language, as this could represent a security risk.
This option is provided to allow for backwards-compatibility, however users should first consider adding `allow_unsafe=True` to any lookups which may be expected to contain data which may be run through the templating engine later. For example::
{{lookup('pipe', '/path/to/some/command', allow_unsafe=True)}}
.. _allow_world_readable_tmpfiles:
allow_world_readable_tmpfiles
=============================
.. versionadded:: 2.1
This makes the temporary files created on the machine to be world readable and will issue a warning instead of failing the task.
It is useful when becoming an unprivileged user::
allow_world_readable_tmpfiles=True
.. _ansible_managed:
ansible_managed
@ -149,7 +177,7 @@ Callbacks are pieces of code in ansible that get called on specific events, perm
This is a developer-centric feature that allows low-level extensions around Ansible to be loaded from
different locations::
callback_plugins = ~/.ansible/plugins/callback_plugins/:/usr/share/ansible_plugins/callback_plugins
callback_plugins = ~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback
Most users will not need to use this feature. See :doc:`developing_plugins` for more details

View file

@ -801,11 +801,11 @@ In 2.x, we have made the order of precedence more specific (with the last listed
* playbook group_vars
* playbook host_vars
* host facts
* registered vars
* set_facts
* play vars
* play vars_prompt
* play vars_files
* registered vars
* set_facts
* role and include vars
* block vars (only for tasks in block)
* task vars (only for the task)

View file

@ -29,7 +29,7 @@ To create a new encrypted data file, run the following command::
First you will be prompted for a password. The password used with vault currently must be the same for all files you wish to use together at the same time.
After providing a password, the tool will launch whatever editor you have defined with $EDITOR, and defaults to vim. Once you are done with the editor session, the file will be saved as encrypted data.
After providing a password, the tool will launch whatever editor you have defined with $EDITOR, and defaults to vi (before 2.1 the default was vim). Once you are done with the editor session, the file will be saved as encrypted data.
The default cipher is AES (which is shared-secret based).

View file

@ -252,6 +252,12 @@
# set to 0 for unlimited (RAM may suffer!).
#max_diff_size = 1048576
# When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as
# a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain
# jinja2 templating language which will be run through the templating engine.
# ENABLING THIS COULD BE A SECURITY RISK
#allow_unsafe_lookups = False
[privilege_escalation]
#become=True
#become_method=sudo
@ -331,7 +337,7 @@
#libvirt_lxc_noseclabel = yes
[colors]
#higlight = white
#highlight = white
#verbose = blue
#warn = bright purple
#error = red

View file

@ -8,6 +8,15 @@ set PREFIX_PYTHONPATH $ANSIBLE_HOME/lib
set PREFIX_PATH $ANSIBLE_HOME/bin
set PREFIX_MANPATH $ANSIBLE_HOME/docs/man
# set quiet flag
if set -q argv
switch $argv
case '-q' '--quiet'
set QUIET "true"
case '*'
end
end
# Set PYTHONPATH
if not set -q PYTHONPATH
set -gx PYTHONPATH $PREFIX_PYTHONPATH
@ -15,7 +24,9 @@ else
switch PYTHONPATH
case "$PREFIX_PYTHONPATH*"
case "*"
echo "Appending PYTHONPATH"
if not [ $QUIET ]
echo "Appending PYTHONPATH"
end
set -gx PYTHONPATH "$PREFIX_PYTHONPATH:$PYTHONPATH"
end
end
@ -38,7 +49,11 @@ set -gx ANSIBLE_LIBRARY $ANSIBLE_HOME/library
# Generate egg_info so that pkg_resources works
pushd $ANSIBLE_HOME
python setup.py egg_info
if [ $QUIET ]
python setup.py -q egg_info
else
python setup.py egg_info
end
if test -e $PREFIX_PYTHONPATH/ansible*.egg-info
rm -r $PREFIX_PYTHONPATH/ansible*.egg-info
end
@ -47,22 +62,19 @@ find . -type f -name "*.pyc" -delete
popd
if set -q argv
switch $argv
case '-q' '--quiet'
case '*'
echo ""
echo "Setting up Ansible to run out of checkout..."
echo ""
echo "PATH=$PATH"
echo "PYTHONPATH=$PYTHONPATH"
echo "ANSIBLE_LIBRARY=$ANSIBLE_LIBRARY"
echo "MANPATH=$MANPATH"
echo ""
echo "Remember, you may wish to specify your host file with -i"
echo ""
echo "Done!"
echo ""
end
if not [ $QUIET ]
echo ""
echo "Setting up Ansible to run out of checkout..."
echo ""
echo "PATH=$PATH"
echo "PYTHONPATH=$PYTHONPATH"
echo "ANSIBLE_LIBRARY=$ANSIBLE_LIBRARY"
echo "MANPATH=$MANPATH"
echo ""
echo "Remember, you may wish to specify your host file with -i"
echo ""
echo "Done!"
echo ""
end
set -e QUIET

View file

@ -175,7 +175,7 @@ def ziploader_setup(modfile, modname):
print("* ziploader module detected; extracted module source to: %s" % debug_dir)
return modfile, argsfile
def runtest(modstyle, modfile, argspath, modname, module_style):
def runtest(modfile, argspath, modname, module_style):
"""Test run a module, piping it's output for reporting."""
if module_style == 'ziploader':
modfile, argspath = ziploader_setup(modfile, modname)

View file

@ -19,5 +19,10 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
__version__ = '2.1.0'
__author__ = 'Ansible, Inc.'
# Note: Do not add any code to this file. The ansible module may be
# a namespace package when using Ansible-2.1+ Anything in this file may not be
# available if one of the other packages in the namespace is loaded first.
#
# This is for backwards compat. Code should be ported to get these from
# ansible.release instead of from here.
from ansible.release import __version__, __author__

View file

@ -30,7 +30,7 @@ import getpass
import signal
import subprocess
from ansible import __version__
from ansible.release import __version__
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.utils.unicode import to_bytes, to_unicode
@ -90,7 +90,10 @@ class CLI(object):
break
if not self.action:
raise AnsibleOptionsError("Missing required action")
# if no need for action if version/help
tmp_options, tmp_args = self.parser.parse_args()
if not(hasattr(tmp_options, 'help') and tmp_options.help) or (hasattr(tmp_options, 'version') and tmp_options.version):
raise AnsibleOptionsError("Missing required action")
def execute(self):
"""
@ -476,7 +479,7 @@ class CLI(object):
display.display(text)
else:
self.pager_pipe(text, os.environ['PAGER'])
elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0:
elif subprocess.call('(less --version) &> /dev/null', shell = True) == 0:
self.pager_pipe(text, 'less')
else:
display.display(text)

View file

@ -130,7 +130,7 @@ class AdHocCLI(CLI):
variable_manager.set_inventory(inventory)
no_hosts = False
if len(inventory.list_hosts(pattern)) == 0:
if len(inventory.list_hosts()) == 0:
# Empty inventory
display.warning("provided hosts list is empty, only localhost is available")
no_hosts = True
@ -139,7 +139,7 @@ class AdHocCLI(CLI):
hosts = inventory.list_hosts(pattern)
if len(hosts) == 0 and no_hosts is False:
# Invalid limit
raise AnsibleError("Specified --limit does not match any hosts")
raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
if self.options.listhosts:
display.display(' hosts (%d):' % len(hosts))

View file

@ -38,7 +38,7 @@ import sys
from ansible import constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.errors import AnsibleError
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.inventory import Inventory
@ -301,7 +301,7 @@ class ConsoleCLI(CLI, cmd.Cmd):
def do_become(self, arg):
"""Toggle whether plays run with become"""
if arg:
self.options.become_user = arg
self.options.become = C.mk_boolean(arg)
display.v("become changed to %s" % self.options.become)
self.set_prompt()
else:
@ -419,13 +419,19 @@ class ConsoleCLI(CLI, cmd.Cmd):
self.inventory = Inventory(loader=self.loader, variable_manager=self.variable_manager, host_list=self.options.inventory)
self.variable_manager.set_inventory(self.inventory)
if len(self.inventory.list_hosts(self.pattern)) == 0:
no_hosts = False
if len(self.inventory.list_hosts()) == 0:
# Empty inventory
no_hosts = True
display.warning("provided hosts list is empty, only localhost is available")
self.inventory.subset(self.options.subset)
hosts = self.inventory.list_hosts(self.pattern)
if len(hosts) == 0 and not no_hosts:
raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
self.groups = self.inventory.list_groups()
self.hosts = [x.name for x in self.inventory.list_hosts(self.pattern)]
self.hosts = [x.name for x in hosts]
# This hack is to work around readline issues on a mac:
# http://stackoverflow.com/a/7116997/541202

View file

@ -51,7 +51,7 @@ class GalaxyCLI(CLI):
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup")
def __init__(self, args):
self.api = None
self.galaxy = None
@ -65,95 +65,71 @@ class GalaxyCLI(CLI):
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
self.set_action()
# options specific to actions
# common
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.')
# specific to actions
if self.action == "delete":
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
elif self.action == "import":
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True,
help='Don\'t wait for import results.')
self.parser.add_option('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_user/github_repo.')
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.')
self.parser.add_option('--branch', dest='reference', help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False, help='Check the status of the most recent import request for given github_user/github_repo.')
elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.add_option('-p', '--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option(
'--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
self.parser.add_option('-p', '--init-path', dest='init_path', default="./", help='The path in which the skeleton role will be created. The default is the current working directory.')
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file',
help='A file containing a list of roles to be imported')
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
elif self.action == "login":
self.parser.set_usage("usage: %prog login [options]")
self.parser.add_option('--github-token', dest='token', default=None,
help='Identify with github token rather than username and password.')
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
elif self.action == "search":
self.parser.add_option('--platforms', dest='platforms',
help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='tags',
help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author',
help='GitHub username')
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]")
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='tags', help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author', help='GitHub username')
elif self.action == "setup":
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
self.parser.add_option('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False,
help='List all of your integrations.')
self.parser.add_option('--remove', dest='remove_id', default=None, help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
# options that apply to more than one action
if self.action in ['init', 'info']:
self.parser.add_option( '--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles")
if not self.action in ("delete","import","init","login","setup"):
# NOTE: while the option type=str, the default is a list, and the
# callback will set the value to a list.
self.parser.add_option('-p', '--roles-path', dest='roles_path',
action="callback", callback=CLI.expand_paths,
type=str, default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. '
'The default is the roles_path configured in your '
'ansible.cfg file (/etc/ansible/roles if not configured)')
if self.action in ("import","info","init","install","login","search","setup","delete"):
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER,
help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=False,
help='Ignore SSL certificate validation errors.')
self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.expand_paths, type=str, default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg file (/etc/ansible/roles if not configured)')
if self.action in ("init","install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role')
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role')
self.options, self.args =self.parser.parse_args()
display.verbosity = self.options.verbosity
self.galaxy = Galaxy(self.options)
return True
def run(self):
super(GalaxyCLI, self).run()
# if not offline, get connect to galaxy api
if self.action in ("import","info","install","search","login","setup","delete") or \
(self.action == 'init' and not self.options.offline):
self.api = GalaxyAPI(self.galaxy)
self.api = GalaxyAPI(self.galaxy)
self.execute()
def exit_without_ignore(self, rc=1):
@ -242,7 +218,7 @@ class GalaxyCLI(CLI):
# platforms included (but commented out), the galaxy_tags
# list, and the dependencies section
platforms = []
if not offline and self.api:
if not offline:
platforms = self.api.get_list("platforms") or []
# group the list of platforms from the api based
@ -315,7 +291,7 @@ class GalaxyCLI(CLI):
role_info.update(install_info)
remote_data = False
if self.api:
if not self.options.offline:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
@ -489,22 +465,23 @@ class GalaxyCLI(CLI):
else:
# show all valid roles in the roles_path directory
roles_path = self.get_opt('roles_path')
roles_path = os.path.expanduser(roles_path)
if not os.path.exists(roles_path):
raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path)
elif not os.path.isdir(roles_path):
raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path)
path_files = os.listdir(roles_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (path_file, version))
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % role_path)
elif not os.path.isdir(role_path):
raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % role_path)
path_files = os.listdir(role_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (path_file, version))
return 0
def execute_search(self):

View file

@ -158,6 +158,12 @@ class PlaybookCLI(CLI):
display.display('\nplaybook: %s' % p['playbook'])
for idx, play in enumerate(p['plays']):
if play._included_path is not None:
loader.set_basedir(play._included_path)
else:
pb_dir = os.path.realpath(os.path.dirname(p['playbook']))
loader.set_basedir(pb_dir)
msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name)
mytags = set(play.tags)
msg += '\tTAGS: [%s]' % (','.join(mytags))

View file

@ -28,6 +28,7 @@ from ansible.compat.six.moves import configparser
from ansible.parsing.quoting import unquote
from ansible.errors import AnsibleOptionsError
from ansible.utils.path import makedirs_safe
# copied from utils, avoid circular reference fun :)
def mk_boolean(value):
@ -39,16 +40,22 @@ def mk_boolean(value):
else:
return False
def shell_expand(path):
def shell_expand(path, expand_relative_paths=False):
'''
shell_expand is needed as os.path.expanduser does not work
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE
'''
if path:
path = os.path.expanduser(os.path.expandvars(path))
if expand_relative_paths and not path.startswith('/'):
# paths are always 'relative' to the config?
if 'CONFIG_FILE' in globals():
CFGDIR = os.path.dirname(CONFIG_FILE)
path = os.path.join(CFGDIR, path)
path = os.path.abspath(path)
return path
def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False, isnone=False, ispath=False, ispathlist=False, istmppath=False):
def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False, isnone=False, ispath=False, ispathlist=False, istmppath=False, expand_relative_paths=False):
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
@ -69,11 +76,13 @@ def get_config(p, section, key, env_var, default, boolean=False, integer=False,
elif istmppath:
value = shell_expand(value)
if not os.path.exists(value):
os.makedirs(value, 0o700)
value = tempfile.mkdtemp(prefix='ansible-local-tmp', dir=value)
makedirs_safe(value, 0o700)
prefix = 'ansible-local-%s' % os.getpid()
value = tempfile.mkdtemp(prefix=prefix, dir=value)
elif ispathlist:
if isinstance(value, string_types):
value = [shell_expand(x) for x in value.split(os.pathsep)]
value = [shell_expand(x, expand_relative_paths=expand_relative_paths) \
for x in value.split(os.pathsep)]
elif isinstance(value, string_types):
value = unquote(value)
return value
@ -140,7 +149,7 @@ DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, None)
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, ispath=True)
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, ispathlist=True)
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispathlist=True)
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispathlist=True, expand_relative_paths=True)
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
DEFAULT_LOCAL_TMP = get_config(p, DEFAULTS, 'local_tmp', 'ANSIBLE_LOCAL_TEMP', '$HOME/.ansible/tmp', istmppath=True)
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
@ -173,6 +182,7 @@ DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIB
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], islist=True)
DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, integer=True)
DEFAULT_ALLOW_UNSAFE_LOOKUPS = get_config(p, DEFAULTS, 'allow_unsafe_lookups', None, False, boolean=True)
# static includes
DEFAULT_TASK_INCLUDES_STATIC = get_config(p, DEFAULTS, 'task_includes_static', 'ANSIBLE_TASK_INCLUDES_STATIC', False, boolean=True)
@ -302,7 +312,7 @@ COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECAT
COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan')
COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red')
COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green')
COLOR_CHANGED = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_CHANGED', 'yellow')
COLOR_CHANGED = get_config(p, 'colors', 'changed', 'ANSIBLE_COLOR_CHANGED', 'yellow')
COLOR_DIFF_ADD = get_config(p, 'colors', 'diff_add', 'ANSIBLE_COLOR_DIFF_ADD', 'green')
COLOR_DIFF_REMOVE = get_config(p, 'colors', 'diff_remove', 'ANSIBLE_COLOR_DIFF_REMOVE', 'red')
COLOR_DIFF_LINES = get_config(p, 'colors', 'diff_lines', 'ANSIBLE_COLOR_DIFF_LINES', 'cyan')

View file

@ -30,11 +30,14 @@ import zipfile
from io import BytesIO
# from Ansible
from ansible import __version__
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.utils.unicode import to_bytes, to_unicode
from ansible.plugins.strategy import action_write_locks
# Must import strategy and use write_locks from there
# If we import write_locks directly then we end up binding a
# variable to the object and then it never gets updated.
from ansible.plugins import strategy
try:
from __main__ import display
@ -118,7 +121,7 @@ def invoke_module(module, modlib_path, json_params):
else:
os.environ['PYTHONPATH'] = modlib_path
p = subprocess.Popen(['%(interpreter)s', module], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
p = subprocess.Popen([%(interpreter)s, module], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate(json_params)
if not isinstance(stderr, (bytes, unicode)):
@ -215,7 +218,7 @@ def debug(command, zipped_mod, json_params):
else:
os.environ['PYTHONPATH'] = basedir
p = subprocess.Popen(['%(interpreter)s', script_path, args_path], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
p = subprocess.Popen([%(interpreter)s, script_path, args_path], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if not isinstance(stderr, (bytes, unicode)):
@ -267,18 +270,33 @@ if __name__ == '__main__':
# remote_tmpdir and this module executing under async. So we cannot
# store this in remote_tmpdir (use system tempdir instead)
temp_path = tempfile.mkdtemp(prefix='ansible_')
zipped_mod = os.path.join(temp_path, 'ansible_modlib.zip')
modlib = open(zipped_mod, 'wb')
modlib.write(base64.b64decode(ZIPDATA))
modlib.close()
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ZIPLOADER_PARAMS)
else:
z = zipfile.ZipFile(zipped_mod)
z = zipfile.ZipFile(zipped_mod, mode='r')
module = os.path.join(temp_path, 'ansible_module_%(ansible_module)s.py')
f = open(module, 'wb')
f.write(z.read('ansible_module_%(ansible_module)s.py'))
f.close()
# When installed via setuptools (including python setup.py install),
# ansible may be installed with an easy-install.pth file. That file
# may load the system-wide install of ansible rather than the one in
# the module. sitecustomize is the only way to override that setting.
z = zipfile.ZipFile(zipped_mod, mode='a')
# py3: zipped_mod will be text, py2: it's bytes. Need bytes at the end
z = zipfile.ZipFile(zipped_mod, mode='a')
sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% zipped_mod
sitecustomize = sitecustomize.encode('utf-8')
z.writestr('sitecustomize.py', sitecustomize)
z.close()
exitcode = invoke_module(module, zipped_mod, ZIPLOADER_PARAMS)
finally:
try:
@ -372,12 +390,12 @@ def _get_shebang(interpreter, task_vars, args=tuple()):
file rather than trust that we reformatted what they already have
correctly.
"""
interpreter_config = u'ansible_%s_interpreter' % os.path.basename(interpreter)
interpreter_config = u'ansible_%s_interpreter' % os.path.basename(interpreter).strip()
if interpreter_config not in task_vars:
return (None, interpreter)
interpreter = task_vars[interpreter_config]
interpreter = task_vars[interpreter_config].strip()
shebang = u'#!' + interpreter
if args:
@ -385,12 +403,6 @@ def _get_shebang(interpreter, task_vars, args=tuple()):
return (shebang, interpreter)
def _get_facility(task_vars):
facility = C.DEFAULT_SYSLOG_FACILITY
if 'ansible_syslog_facility' in task_vars:
facility = task_vars['ansible_syslog_facility']
return facility
def recursive_finder(name, data, py_module_names, py_module_cache, zf):
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
@ -529,14 +541,7 @@ def _find_snippet_imports(module_name, module_data, module_path, module_args, ta
py_module_names = set()
if module_substyle == 'python':
# ziploader for new-style python classes
constants = dict(
SELINUX_SPECIAL_FS=C.DEFAULT_SELINUX_SPECIAL_FS,
SYSLOG_FACILITY=_get_facility(task_vars),
)
params = dict(ANSIBLE_MODULE_ARGS=module_args,
ANSIBLE_MODULE_CONSTANTS=constants,
)
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
python_repred_params = to_bytes(repr(json.dumps(params)), errors='strict')
try:
@ -551,19 +556,34 @@ def _find_snippet_imports(module_name, module_data, module_path, module_args, ta
zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ZIPLOADER: using cached module: %s' % cached_module_filename)
zipdata = open(cached_module_filename, 'rb').read()
# Fool the check later... I think we should just remove the check
py_module_names.add(('basic',))
else:
with action_write_locks[module_name]:
if module_name in strategy.action_write_locks:
display.debug('ZIPLOADER: Using lock for %s' % module_name)
lock = strategy.action_write_locks[module_name]
else:
# If the action plugin directly invokes the module (instead of
# going through a strategy) then we don't have a cross-process
# Lock specifically for this module. Use the "unexpected
# module" lock instead
display.debug('ZIPLOADER: Using generic lock for %s' % module_name)
lock = strategy.action_write_locks[None]
display.debug('ZIPLOADER: Acquiring lock')
with lock:
display.debug('ZIPLOADER: Lock acquired: %s' % id(lock))
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
display.debug('ZIPLOADER: Creating module')
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
zf.writestr('ansible/__init__.py', b''.join((b"__version__ = '", to_bytes(__version__), b"'\n")))
zf.writestr('ansible/module_utils/__init__.py', b'')
zf.writestr('ansible/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\ntry:\n from ansible.release import __version__,__author__\nexcept ImportError:\n __version__="' + to_bytes(__version__) + b'"\n __author__="' + to_bytes(__author__) + b'"\n')
zf.writestr('ansible/module_utils/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n')
zf.writestr('ansible_module_%s.py' % module_name, module_data)
@ -579,15 +599,19 @@ def _find_snippet_imports(module_name, module_data, module_path, module_args, ta
# Note -- if we have a global function to setup, that would
# be a better place to run this
os.mkdir(lookup_path)
display.debug('ZIPLOADER: Writing module')
with open(cached_module_filename + '-part', 'w') as f:
f.write(zipdata)
# Rename the file into its final position in the cache so
# future users of this module can read it off the
# filesystem instead of constructing from scratch.
display.debug('ZIPLOADER: Renaming module')
os.rename(cached_module_filename + '-part', cached_module_filename)
display.debug('ZIPLOADER: Done creating module')
if zipdata is None:
display.debug('ZIPLOADER: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
@ -601,6 +625,12 @@ def _find_snippet_imports(module_name, module_data, module_path, module_args, ta
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars)
if shebang is None:
shebang = u'#!/usr/bin/python'
# Enclose the parts of the interpreter in quotes because we're
# substituting it into the template as a Python string
interpreter_parts = interpreter.split(u' ')
interpreter = u"'{0}'".format(u"', '".join(interpreter_parts))
output.write(to_bytes(ACTIVE_ZIPLOADER_TEMPLATE % dict(
zipdata=zipdata,
ansible_module=module_name,
@ -656,7 +686,7 @@ def _find_snippet_imports(module_name, module_data, module_path, module_args, ta
# The main event -- substitute the JSON args string into the module
module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)
facility = b'syslog.' + to_bytes(_get_facility(task_vars), errors='strict')
facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='strict')
module_data = module_data.replace(b'syslog.LOG_USER', facility)
return (module_data, module_style, shebang)

View file

@ -80,7 +80,7 @@ class HostState:
ret.append(states[i])
return "|".join(ret)
return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? %s, rescue child state? %s, always child state? %s, did start at task? %s" % (
return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), rescue child state? (%s), always child state? (%s), did start at task? %s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
@ -216,10 +216,13 @@ class PlayIterator:
self._play.handlers.extend(play.compile_roles_handlers())
def get_host_state(self, host):
try:
return self._host_states[host.name].copy()
except KeyError:
raise AnsibleError("invalid host (%s) specified for playbook iteration" % host)
# Since we're using the PlayIterator to carry forward failed hosts,
# in the event that a previous host was not in the current inventory
# we create a stub state for it now
if host.name not in self._host_states:
self._host_states[host.name] = HostState(blocks=[])
return self._host_states[host.name].copy()
def get_next_task_for_host(self, host, peek=False):
@ -295,10 +298,10 @@ class PlayIterator:
setup_block = self._blocks[0]
if setup_block.has_tasks() and len(setup_block.block) > 0:
task = setup_block.block[0]
if not peek:
# mark the host as having gathered facts, because we're
# returning the setup task to be executed
host.set_gathered_facts(True)
if not peek:
# mark the host as having gathered facts, because we're
# returning the setup task to be executed
host.set_gathered_facts(True)
else:
# This is the second trip through ITERATING_SETUP, so we clear
# the flag and move onto the next block in the list while setting
@ -326,8 +329,7 @@ class PlayIterator:
if self._check_failed_state(state.tasks_child_state):
# failed child state, so clear it and move into the rescue portion
state.tasks_child_state = None
state.fail_state |= self.FAILED_TASKS
state.run_state = self.ITERATING_RESCUE
self._set_failed_state(state)
else:
# get the next task recursively
if task is None or state.tasks_child_state.run_state == self.ITERATING_COMPLETE:
@ -365,8 +367,7 @@ class PlayIterator:
(state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek)
if self._check_failed_state(state.rescue_child_state):
state.rescue_child_state = None
state.fail_state |= self.FAILED_RESCUE
state.run_state = self.ITERATING_ALWAYS
self._set_failed_state(state)
else:
if task is None or state.rescue_child_state.run_state == self.ITERATING_COMPLETE:
state.rescue_child_state = None
@ -396,8 +397,7 @@ class PlayIterator:
(state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek)
if self._check_failed_state(state.always_child_state):
state.always_child_state = None
state.fail_state |= self.FAILED_ALWAYS
state.run_state = self.ITERATING_COMPLETE
self._set_failed_state(state)
else:
if task is None or state.always_child_state.run_state == self.ITERATING_COMPLETE:
state.always_child_state = None
@ -466,18 +466,25 @@ class PlayIterator:
def mark_host_failed(self, host):
s = self.get_host_state(host)
display.debug("marking host %s failed, current state: %s" % (host, s))
s = self._set_failed_state(s)
display.debug("^ failed state is now: %s" % s)
self._host_states[host.name] = s
def get_failed_hosts(self):
return dict((host, True) for (host, state) in iteritems(self._host_states) if state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE)
return dict((host, True) for (host, state) in iteritems(self._host_states) if self._check_failed_state(state))
def _check_failed_state(self, state):
if state is None:
return False
elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state):
return True
elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
return True
elif state.fail_state != self.FAILED_NONE:
if state.run_state == self.ITERATING_RESCUE and state.fail_state&self.FAILED_RESCUE == 0 or \
state.run_state == self.ITERATING_ALWAYS and state.fail_state&self.FAILED_ALWAYS == 0:
if state.run_state == self.ITERATING_RESCUE and state.fail_state&self.FAILED_RESCUE == 0:
return False
elif state.run_state == self.ITERATING_ALWAYS and state.fail_state&self.FAILED_ALWAYS == 0:
return False
else:
return True
@ -487,10 +494,6 @@ class PlayIterator:
return False
else:
return True
elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state):
return True
elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
return True
return False
def is_failed(self, host):

View file

@ -27,7 +27,8 @@ from ansible import constants as C
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.playbook import Playbook
from ansible.template import Templar
from ansible.utils.unicode import to_unicode
from ansible.utils.path import makedirs_safe
from ansible.utils.unicode import to_unicode, to_str
try:
from __main__ import display
@ -70,7 +71,7 @@ class PlaybookExecutor:
try:
for playbook_path in self._playbooks:
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
self._inventory.set_playbook_basedir(os.path.dirname(playbook_path))
self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))
if self._tqm is None: # we are doing a listing
entry = {'playbook': playbook_path}
@ -128,6 +129,10 @@ class PlaybookExecutor:
else:
self._tqm._unreachable_hosts.update(self._unreachable_hosts)
previously_failed = len(self._tqm._failed_hosts)
previously_unreachable = len(self._tqm._unreachable_hosts)
break_play = False
# we are actually running plays
for batch in self._get_serialized_batches(new_play):
if len(batch) == 0:
@ -140,24 +145,31 @@ class PlaybookExecutor:
# and run it...
result = self._tqm.run(play=play)
# break the play if the result equals the special return code
if result == self._tqm.RUN_FAILED_BREAK_PLAY:
result = self._tqm.RUN_FAILED_HOSTS
break_play = True
# check the number of failures here, to see if they're above the maximum
# failure percentage allowed, or if any errors are fatal. If either of those
# conditions are met, we break out, otherwise we only break out if the entire
# batch failed
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts)
if new_play.max_fail_percentage is not None and \
int((new_play.max_fail_percentage)/100.0 * len(batch)) > int((len(batch) - failed_hosts_count) / len(batch) * 100.0):
break
elif len(batch) == failed_hosts_count:
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \
(previously_failed + previously_unreachable)
if len(batch) == failed_hosts_count:
break_play = True
break
# clear the failed hosts dictionaires in the TQM for the next batch
# update the previous counts so they don't accumulate incorrectly
# over multiple serial batches
previously_failed += len(self._tqm._failed_hosts) - previously_failed
previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable
# save the unreachable hosts from this batch
self._unreachable_hosts.update(self._tqm._unreachable_hosts)
self._tqm.clear_failed_hosts()
# if the last result wasn't zero or 3 (some hosts were unreachable),
# break out of the serial batch loop
if result not in (0, 3):
if break_play:
break
i = i + 1 # per play
@ -174,8 +186,10 @@ class PlaybookExecutor:
if len(retries) > 0:
if C.RETRY_FILES_SAVE_PATH:
basedir = C.shell_expand(C.RETRY_FILES_SAVE_PATH)
elif playbook_path:
basedir = os.path.dirname(os.path.abspath(playbook_path))
else:
basedir = os.path.dirname(playbook_path)
basedir = '~/'
(retry_name, _) = os.path.splitext(os.path.basename(playbook_path))
filename = os.path.join(basedir, "%s.retry" % retry_name)
@ -247,13 +261,13 @@ class PlaybookExecutor:
re-running on ONLY the failed hosts. This may duplicate some variable
information in group_vars/host_vars but that is ok, and expected.
'''
try:
makedirs_safe(os.path.dirname(retry_path))
with open(retry_path, 'w') as fd:
for x in replay_hosts:
fd.write("%s\n" % x)
except Exception as e:
display.error("Could not create retry file '%s'. The error was: %s" % (retry_path, e))
display.warning("Could not create retry file '%s'.\n\t%s" % (retry_path, to_str(e)))
return False
return True

View file

@ -173,7 +173,7 @@ class ResultProcess(multiprocessing.Process):
# if this task is registering facts, do that now
loop_var = 'item'
if result._task.loop_control:
loop_var = result._task.loop_control.get('loop_var') or 'item'
loop_var = result._task.loop_control.loop_var or 'item'
item = result_item.get(loop_var, None)
if result._task.action == 'include_vars':
for (key, value) in iteritems(result_item['ansible_facts']):

View file

@ -77,21 +77,25 @@ class WorkerProcess(multiprocessing.Process):
self._variable_manager = variable_manager
self._shared_loader_obj = shared_loader_obj
# dupe stdin, if we have one
self._new_stdin = sys.stdin
try:
fileno = sys.stdin.fileno()
if fileno is not None:
try:
self._new_stdin = os.fdopen(os.dup(fileno))
except OSError:
# couldn't dupe stdin, most likely because it's
# not a valid file descriptor, so we just rely on
# using the one that was passed in
pass
except ValueError:
# couldn't get stdin's fileno, so we just carry on
pass
if sys.stdin.isatty():
# dupe stdin, if we have one
self._new_stdin = sys.stdin
try:
fileno = sys.stdin.fileno()
if fileno is not None:
try:
self._new_stdin = os.fdopen(os.dup(fileno))
except OSError:
# couldn't dupe stdin, most likely because it's
# not a valid file descriptor, so we just rely on
# using the one that was passed in
pass
except (AttributeError, ValueError):
# couldn't get stdin's fileno, so we just carry on
pass
else:
# set to /dev/null
self._new_stdin = os.devnull
def run(self):
'''

View file

@ -71,6 +71,7 @@ class TaskExecutor:
self._shared_loader_obj = shared_loader_obj
self._connection = None
self._rslt_q = rslt_q
self._loop_eval_error = None
def run(self):
'''
@ -90,7 +91,13 @@ class TaskExecutor:
roledir = self._task._role._role_path
self._job_vars['roledir'] = roledir
items = self._get_loop_items()
try:
items = self._get_loop_items()
except AnsibleUndefinedVariable as e:
# save the error raised here for use later
items = None
self._loop_eval_error = e
if items is not None:
if len(items) > 0:
item_results = self._run_loop(items)
@ -232,7 +239,7 @@ class TaskExecutor:
loop_var = self._task.loop_control.loop_var or 'item'
if loop_var in task_vars:
raise AnsibleError("the loop variable '%s' is already in use. You should set the `loop_var` value in the `loop_control` option for the task to something else to avoid variable collisions" % loop_var)
display.warning("The loop variable '%s' is already in use. You should set the `loop_var` value in the `loop_control` option for the task to something else to avoid variable collisions and unexpected behavior." % loop_var)
items = self._squash_items(items, loop_var, task_vars)
for item in items:
@ -269,59 +276,68 @@ class TaskExecutor:
Squash items down to a comma-separated list for certain modules which support it
(typically package management modules).
'''
# _task.action could contain templatable strings (via action: and
# local_action:) Template it before comparing. If we don't end up
# optimizing it here, the templatable string might use template vars
# that aren't available until later (it could even use vars from the
# with_items loop) so don't make the templated string permanent yet.
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
task_action = self._task.action
if templar._contains_vars(task_action):
task_action = templar.template(task_action, fail_on_undefined=False)
name = None
try:
# _task.action could contain templatable strings (via action: and
# local_action:) Template it before comparing. If we don't end up
# optimizing it here, the templatable string might use template vars
# that aren't available until later (it could even use vars from the
# with_items loop) so don't make the templated string permanent yet.
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
task_action = self._task.action
if templar._contains_vars(task_action):
task_action = templar.template(task_action, fail_on_undefined=False)
if len(items) > 0 and task_action in self.SQUASH_ACTIONS:
if all(isinstance(o, string_types) for o in items):
final_items = []
if len(items) > 0 and task_action in self.SQUASH_ACTIONS:
if all(isinstance(o, string_types) for o in items):
final_items = []
name = None
for allowed in ['name', 'pkg', 'package']:
name = self._task.args.pop(allowed, None)
if name is not None:
break
for allowed in ['name', 'pkg', 'package']:
name = self._task.args.pop(allowed, None)
if name is not None:
break
# This gets the information to check whether the name field
# contains a template that we can squash for
template_no_item = template_with_item = None
if name:
if templar._contains_vars(name):
variables[loop_var] = '\0$'
template_no_item = templar.template(name, variables, cache=False)
variables[loop_var] = '\0@'
template_with_item = templar.template(name, variables, cache=False)
del variables[loop_var]
# This gets the information to check whether the name field
# contains a template that we can squash for
template_no_item = template_with_item = None
if name:
if templar._contains_vars(name):
variables[loop_var] = '\0$'
template_no_item = templar.template(name, variables, cache=False)
variables[loop_var] = '\0@'
template_with_item = templar.template(name, variables, cache=False)
del variables[loop_var]
# Check if the user is doing some operation that doesn't take
# name/pkg or the name/pkg field doesn't have any variables
# and thus the items can't be squashed
if template_no_item != template_with_item:
for item in items:
variables[loop_var] = item
if self._task.evaluate_conditional(templar, variables):
new_item = templar.template(name, cache=False)
final_items.append(new_item)
self._task.args['name'] = final_items
# Wrap this in a list so that the calling function loop
# executes exactly once
return [final_items]
else:
# Restore the name parameter
self._task.args['name'] = name
#elif:
# Right now we only optimize single entries. In the future we
# could optimize more types:
# * lists can be squashed together
# * dicts could squash entries that match in all cases except the
# name or pkg field.
# Check if the user is doing some operation that doesn't take
# name/pkg or the name/pkg field doesn't have any variables
# and thus the items can't be squashed
if template_no_item != template_with_item:
for item in items:
variables[loop_var] = item
if self._task.evaluate_conditional(templar, variables):
new_item = templar.template(name, cache=False)
final_items.append(new_item)
self._task.args['name'] = final_items
# Wrap this in a list so that the calling function loop
# executes exactly once
return [final_items]
else:
# Restore the name parameter
self._task.args['name'] = name
#elif:
# Right now we only optimize single entries. In the future we
# could optimize more types:
# * lists can be squashed together
# * dicts could squash entries that match in all cases except the
# name or pkg field.
except:
# Squashing is an optimization. If it fails for any reason,
# simply use the unoptimized list of items.
# Restore the name parameter
if name is not None:
self._task.args['name'] = name
pass
return items
def _execute(self, variables=None):
@ -368,6 +384,11 @@ class TaskExecutor:
if not self._task.evaluate_conditional(templar, variables):
display.debug("when evaluation failed, skipping this task")
return dict(changed=False, skipped=True, skip_reason='Conditional check failed', _ansible_no_log=self._play_context.no_log)
# since we're not skipping, if there was a loop evaluation error
# raised earlier we need to raise it now to halt the execution of
# this task
if self._loop_eval_error is not None:
raise self._loop_eval_error
except AnsibleError:
# skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags
if self._task.action != 'include':
@ -400,7 +421,17 @@ class TaskExecutor:
# get the connection and the handler for this execution
if not self._connection or not getattr(self._connection, 'connected', False) or self._play_context.remote_addr != self._connection._play_context.remote_addr:
self._connection = self._get_connection(variables=variables, templar=templar)
self._connection.set_host_overrides(host=self._host)
hostvars = variables.get('hostvars', None)
if hostvars:
try:
target_hostvars = hostvars.raw_get(self._host.name)
except:
# FIXME: this should catch the j2undefined error here
# specifically instead of all exceptions
target_hostvars = dict()
else:
target_hostvars = dict()
self._connection.set_host_overrides(host=self._host, hostvars=target_hostvars)
else:
# if connection is reused, its _play_context is no longer valid and needs
# to be replaced with the one templated above, in case other data changed
@ -414,10 +445,14 @@ class TaskExecutor:
self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token)
# Read some values from the task, so that we can modify them if need be
if self._task.until is not None:
if self._task.until:
retries = self._task.retries
if retries <= 0:
if retries is None:
retries = 3
elif retries <= 0:
retries = 1
else:
retries += 1
else:
retries = 1
@ -431,7 +466,7 @@ class TaskExecutor:
display.debug("starting attempt loop")
result = None
for attempt in range(retries):
for attempt in range(1, retries + 1):
display.debug("running the handler")
try:
result = self._handler.run(task_vars=variables)
@ -448,17 +483,8 @@ class TaskExecutor:
vars_copy[self._task.register] = wrap_var(result.copy())
if self._task.async > 0:
# the async_wrapper module returns dumped JSON via its stdout
# response, so we parse it here and replace the result
try:
if 'skipped' in result and result['skipped'] or 'failed' in result and result['failed']:
return result
result = json.loads(result.get('stdout'))
except (TypeError, ValueError) as e:
return dict(failed=True, msg=u"The async task did not return valid JSON: %s" % to_unicode(e))
if self._task.poll > 0:
result = self._poll_async_result(result=result, templar=templar)
result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy)
# ensure no log is preserved
result["_ansible_no_log"] = self._play_context.no_log
@ -494,23 +520,23 @@ class TaskExecutor:
_evaluate_changed_when_result(result)
_evaluate_failed_when_result(result)
if attempt < retries - 1:
if retries > 1:
cond = Conditional(loader=self._loader)
cond.when = self._task.until
if cond.evaluate_conditional(templar, vars_copy):
break
else:
# no conditional check, or it failed, so sleep for the specified time
result['attempts'] = attempt + 1
result['retries'] = retries
result['_ansible_retry'] = True
display.debug('Retrying task, attempt %d of %d' % (attempt + 1, retries))
self._rslt_q.put(TaskResult(self._host, self._task, result), block=False)
time.sleep(delay)
if attempt < retries:
result['attempts'] = attempt
result['_ansible_retry'] = True
result['retries'] = retries
display.debug('Retrying task, attempt %d of %d' % (attempt, retries))
self._rslt_q.put(TaskResult(self._host, self._task, result), block=False)
time.sleep(delay)
else:
if retries > 1:
# we ran out of attempts, so mark the result as failed
result['attempts'] = retries
result['failed'] = True
# do the final update of the local variables here, for both registered
@ -542,11 +568,14 @@ class TaskExecutor:
display.debug("attempt loop complete, returning result")
return result
def _poll_async_result(self, result, templar):
def _poll_async_result(self, result, templar, task_vars=None):
'''
Polls for the specified JID to be complete
'''
if task_vars is None:
task_vars = self._job_vars
async_jid = result.get('ansible_job_id')
if async_jid is None:
return dict(failed=True, msg="No job id was returned by the async task")
@ -574,14 +603,22 @@ class TaskExecutor:
while time_left > 0:
time.sleep(self._task.poll)
async_result = normal_handler.run()
if int(async_result.get('finished', 0)) == 1 or 'failed' in async_result or 'skipped' in async_result:
async_result = normal_handler.run(task_vars=task_vars)
# We do not bail out of the loop in cases where the failure
# is associated with a parsing error. The async_runner can
# have issues which result in a half-written/unparseable result
# file on disk, which manifests to the user as a timeout happening
# before it's time to timeout.
if int(async_result.get('finished', 0)) == 1 or ('failed' in async_result and async_result.get('_ansible_parsed', False)) or 'skipped' in async_result:
break
time_left -= self._task.poll
if int(async_result.get('finished', 0)) != 1:
return dict(failed=True, msg="async task did not complete within the requested time")
if async_result.get('_ansible_parsed'):
return dict(failed=True, msg="async task did not complete within the requested time")
else:
return dict(failed=True, msg="async task produced unparseable results", async_result=async_result)
else:
return async_result
@ -595,14 +632,14 @@ class TaskExecutor:
# since we're delegating, we don't want to use interpreter values
# which would have been set for the original target host
for i in variables.keys():
if i.startswith('ansible_') and i.endswith('_interpreter'):
if isinstance(i, string_types) and i.startswith('ansible_') and i.endswith('_interpreter'):
del variables[i]
# now replace the interpreter values with those that may have come
# from the delegated-to host
delegated_vars = variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict())
if isinstance(delegated_vars, dict):
for i in delegated_vars:
if i.startswith("ansible_") and i.endswith("_interpreter"):
if isinstance(i, string_types) and i.startswith("ansible_") and i.endswith("_interpreter"):
variables[i] = delegated_vars[i]
conn_type = self._play_context.connection
@ -629,6 +666,8 @@ class TaskExecutor:
raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
if self._play_context.accelerate:
# accelerate is deprecated as of 2.1...
display.deprecated('Accelerated mode is deprecated. Consider using SSH with ControlPersist and pipelining enabled instead')
# launch the accelerated daemon here
ssh_connection = connection
handler = self._shared_loader_obj.action_loader.get(

View file

@ -58,6 +58,13 @@ class TaskQueueManager:
which dispatches the Play's tasks to hosts.
'''
RUN_OK = 0
RUN_ERROR = 1
RUN_FAILED_HOSTS = 2
RUN_UNREACHABLE_HOSTS = 3
RUN_FAILED_BREAK_PLAY = 4
RUN_UNKNOWN_ERROR = 255
def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False):
self._inventory = inventory
@ -107,7 +114,7 @@ class TaskQueueManager:
self._result_prc = ResultProcess(self._final_q, self._workers)
self._result_prc.start()
def _initialize_notified_handlers(self, handlers):
def _initialize_notified_handlers(self, play):
'''
Clears and initializes the shared notified handlers dict with entries
for each handler in the play, which is an empty array that will contain
@ -116,8 +123,7 @@ class TaskQueueManager:
# Zero the dictionary first by removing any entries there.
# Proxied dicts don't support iteritems, so we have to use keys()
for key in self._notified_handlers.keys():
del self._notified_handlers[key]
self._notified_handlers.clear()
def _process_block(b):
temp_list = []
@ -129,12 +135,13 @@ class TaskQueueManager:
return temp_list
handler_list = []
for handler_block in handlers:
for handler_block in play.handlers:
handler_list.extend(_process_block(handler_block))
# then initialize it with the handler names from the handler list
# then initialize it with the given handler list
for handler in handler_list:
self._notified_handlers[handler.get_name()] = []
if handler not in self._notified_handlers:
self._notified_handlers[handler] = []
def load_callbacks(self):
'''
@ -199,6 +206,7 @@ class TaskQueueManager:
new_play = play.copy()
new_play.post_validate(templar)
new_play.handlers = new_play.compile_roles_handlers() + new_play.handlers
self.hostvars = HostVars(
inventory=self._inventory,
@ -219,7 +227,7 @@ class TaskQueueManager:
self.send_callback('v2_playbook_on_play_start', new_play)
# initialize the shared dictionary containing the notified handlers
self._initialize_notified_handlers(new_play.handlers)
self._initialize_notified_handlers(new_play)
# load the specified strategy (or the default linear one)
strategy = strategy_loader.get(new_play.strategy, self)
@ -236,6 +244,16 @@ class TaskQueueManager:
start_at_done = self._start_at_done,
)
# Because the TQM may survive multiple play runs, we start by marking
# any hosts as failed in the iterator here which may have been marked
# as failed in previous runs. Then we clear the internal list of failed
# hosts so we know what failed this round.
for host_name in self._failed_hosts.keys():
host = self._inventory.get_host(host_name)
iterator.mark_host_failed(host)
self.clear_failed_hosts()
# during initialization, the PlayContext will clear the start_at_task
# field to signal that a matching task was found, so check that here
# and remember it so we don't try to skip tasks on future plays
@ -244,6 +262,11 @@ class TaskQueueManager:
# and run the play using the strategy and cleanup on way out
play_return = strategy.run(iterator, play_context)
# now re-save the hosts that failed from the iterator to our internal list
for host_name in iterator.get_failed_hosts():
self._failed_hosts[host_name] = True
self._cleanup_processes()
return play_return
@ -286,6 +309,18 @@ class TaskQueueManager:
def terminate(self):
self._terminated = True
def has_dead_workers(self):
# [<WorkerProcess(WorkerProcess-2, stopped[SIGKILL])>,
# <WorkerProcess(WorkerProcess-2, stopped[SIGTERM])>
defunct = False
for idx,x in enumerate(self._workers):
if hasattr(x[0], 'exitcode'):
if x[0].exitcode in [-9, -15]:
defunct = True
return defunct
def send_callback(self, method_name, *args, **kwargs):
for callback_plugin in [self._stdout_callback] + self._callback_plugins:
# a plugin that set self.disabled to True will not be called

View file

@ -40,14 +40,16 @@ class TaskResult:
return self._check_key('changed')
def is_skipped(self):
# loop results
if 'results' in self._result and self._task.loop:
flag = True
for res in self._result.get('results', []):
if isinstance(res, dict):
flag &= res.get('skipped', False)
return flag
else:
return self._result.get('skipped', False)
results = self._result['results']
# Loop tasks are only considered skipped if all items were skipped.
# some squashed results (eg, yum) are not dicts and can't be skipped individually
if results and all(isinstance(res, dict) and res.get('skipped', False) for res in results):
return True
# regular tasks and squashed non-dict results
return self._result.get('skipped', False)
def is_failed(self):
if 'failed_when_result' in self._result or \
@ -60,7 +62,7 @@ class TaskResult:
return self._check_key('unreachable')
def _check_key(self, key):
if 'results' in self._result and self._task.loop:
if self._result.get('results', []) and self._task.loop:
flag = False
for res in self._result.get('results', []):
if isinstance(res, dict):

View file

@ -28,7 +28,6 @@ import json
import urllib
from urllib2 import quote as urlquote, HTTPError
from urlparse import urlparse
import ansible.constants as C
from ansible.errors import AnsibleError
@ -41,6 +40,21 @@ except ImportError:
from ansible.utils.display import Display
display = Display()
def g_connect(method):
''' wrapper to lazily initialize connection info to galaxy '''
def wrapped(self, *args, **kwargs):
if not self.initialized:
display.vvvv("Initial connection to galaxy_server: %s" % self._api_server)
server_version = self._get_server_api_version()
if not server_version in self.SUPPORTED_VERSIONS:
raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version)
self.baseurl = '%s/api/%s' % (self._api_server, server_version)
self.version = server_version # for future use
display.vvvv("Base API: %s" % self.baseurl)
self.initialized = True
return method(self, *args, **kwargs)
return wrapped
class GalaxyAPI(object):
''' This class is meant to be used as a API client for an Ansible Galaxy server '''
@ -51,25 +65,17 @@ class GalaxyAPI(object):
self.galaxy = galaxy
self.token = GalaxyToken()
self._api_server = C.GALAXY_SERVER
self._validate_certs = not C.GALAXY_IGNORE_CERTS
self._validate_certs = not galaxy.options.ignore_certs
self.baseurl = None
self.version = None
self.initialized = False
# set validate_certs
if galaxy.options.ignore_certs:
self._validate_certs = False
display.vvv('Validate TLS certificates: %s' % self._validate_certs)
display.debug('Validate TLS certificates: %s' % self._validate_certs)
# set the API server
if galaxy.options.api_server != C.GALAXY_SERVER:
self._api_server = galaxy.options.api_server
display.vvv("Connecting to galaxy_server: %s" % self._api_server)
server_version = self.get_server_api_version()
if not server_version in self.SUPPORTED_VERSIONS:
raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version)
self.baseurl = '%s/api/%s' % (self._api_server, server_version)
self.version = server_version # for future use
display.vvv("Base API: %s" % self.baseurl)
def __auth_header(self):
token = self.token.get()
@ -77,6 +83,7 @@ class GalaxyAPI(object):
raise AnsibleError("No access token. You must first use login to authenticate and obtain an access token.")
return {'Authorization': 'Token ' + token}
@g_connect
def __call_galaxy(self, url, args=None, headers=None, method=None):
if args and not headers:
headers = self.__auth_header()
@ -91,13 +98,13 @@ class GalaxyAPI(object):
@property
def api_server(self):
return self._api_server
return self._api_server
@property
def validate_certs(self):
return self._validate_certs
def get_server_api_version(self):
def _get_server_api_version(self):
"""
Fetches the Galaxy API current version to ensure
the API server is up and reachable.
@ -107,8 +114,9 @@ class GalaxyAPI(object):
data = json.load(open_url(url, validate_certs=self._validate_certs))
return data['current_version']
except Exception as e:
raise AnsibleError("The API server (%s) is not responding, please try again later." % url)
raise AnsibleError("The API server (%s) is not responding, please try again later" % url)
@g_connect
def authenticate(self, github_token):
"""
Retrieve an authentication token
@ -119,6 +127,7 @@ class GalaxyAPI(object):
data = json.load(resp)
return data
@g_connect
def create_import_task(self, github_user, github_repo, reference=None):
"""
Post an import request
@ -134,6 +143,7 @@ class GalaxyAPI(object):
return data['results']
return data
@g_connect
def get_import_task(self, task_id=None, github_user=None, github_repo=None):
"""
Check the status of an import task.
@ -145,10 +155,11 @@ class GalaxyAPI(object):
url = "%s?github_user=%s&github_repo=%s" % (url,github_user,github_repo)
else:
raise AnsibleError("Expected task_id or github_user and github_repo")
data = self.__call_galaxy(url)
return data['results']
@g_connect
def lookup_role_by_name(self, role_name, notify=True):
"""
Find a role by name.
@ -170,6 +181,7 @@ class GalaxyAPI(object):
return data["results"][0]
return None
@g_connect
def fetch_role_related(self, related, role_id):
"""
Fetch the list of related items for the given role.
@ -190,6 +202,7 @@ class GalaxyAPI(object):
except:
return None
@g_connect
def get_list(self, what):
"""
Fetch the list of items specified.
@ -213,6 +226,7 @@ class GalaxyAPI(object):
except Exception as error:
raise AnsibleError("Failed to download the %s list: %s" % (what, str(error)))
@g_connect
def search_roles(self, search, **kwargs):
search_url = self.baseurl + '/search/roles/?'
@ -228,7 +242,7 @@ class GalaxyAPI(object):
if tags and isinstance(tags, basestring):
tags = tags.split(',')
search_url += '&tags_autocomplete=' + '+'.join(tags)
if platforms and isinstance(platforms, basestring):
platforms = platforms.split(',')
search_url += '&platforms_autocomplete=' + '+'.join(platforms)
@ -238,10 +252,11 @@ class GalaxyAPI(object):
if author:
search_url += '&username_autocomplete=%s' % author
data = self.__call_galaxy(search_url)
return data
@g_connect
def add_secret(self, source, github_user, github_repo, secret):
url = "%s/notification_secrets/" % self.baseurl
args = urllib.urlencode({
@ -253,16 +268,19 @@ class GalaxyAPI(object):
data = self.__call_galaxy(url, args=args)
return data
@g_connect
def list_secrets(self):
url = "%s/notification_secrets" % self.baseurl
data = self.__call_galaxy(url, headers=self.__auth_header())
return data
@g_connect
def remove_secret(self, secret_id):
url = "%s/notification_secrets/%s/" % (self.baseurl, secret_id)
data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE')
return data
@g_connect
def delete_role(self, github_user, github_repo):
url = "%s/removerole/?github_user=%s&github_repo=%s" % (self.baseurl,github_user,github_repo)
data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE')

View file

@ -54,13 +54,9 @@ class GalaxyRole(object):
self._metadata = None
self._install_info = None
self._validate_certs = not galaxy.options.ignore_certs
self._validate_certs = not C.GALAXY_IGNORE_CERTS
# set validate_certs
if galaxy.options.ignore_certs:
self._validate_certs = False
display.vvv('Validate TLS certificates: %s' % self._validate_certs)
display.debug('Validate TLS certificates: %s' % self._validate_certs)
self.options = galaxy.options
self.galaxy = galaxy

View file

@ -34,11 +34,10 @@ from ansible.inventory.dir import InventoryDirectory, get_file_parser
from ansible.inventory.group import Group
from ansible.inventory.host import Host
from ansible.plugins import vars_loader
from ansible.utils.unicode import to_unicode
from ansible.utils.unicode import to_unicode, to_bytes
from ansible.utils.vars import combine_vars
from ansible.parsing.utils.addresses import parse_address
HOSTS_PATTERNS_CACHE = {}
from ansible.utils.path import unfrackpath
try:
from __main__ import display
@ -46,6 +45,8 @@ except ImportError:
from ansible.utils.display import Display
display = Display()
HOSTS_PATTERNS_CACHE = {}
class Inventory(object):
"""
Host inventory for ansible.
@ -55,9 +56,10 @@ class Inventory(object):
# the host file file, or script path, or list of hosts
# if a list, inventory data will NOT be loaded
self.host_list = host_list
self.host_list = unfrackpath(host_list, follow=False)
self._loader = loader
self._variable_manager = variable_manager
self.localhost = None
# caching to avoid repeated calculations, particularly with
# external inventory scripts.
@ -68,6 +70,12 @@ class Inventory(object):
self._pattern_cache = {}
self._vars_plugins = []
self._basedir = self.basedir()
# Contains set of filenames under group_vars directories
self._group_vars_files = self._find_group_vars_files(self._basedir)
self._host_vars_files = self._find_host_vars_files(self._basedir)
# to be set by calling set_playbook_basedir by playbook code
self._playbook_basedir = None
@ -119,7 +127,15 @@ class Inventory(object):
display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_unicode(e))
host = h
port = None
all.add_host(Host(host, port))
new_host = Host(host, port)
if h in C.LOCALHOST:
# set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'.
if self.localhost is not None:
display.warning("A duplicate localhost-like entry was found (%s). First found localhost was %s" % (h, self.localhost.name))
display.vvvv("Set default localhost to %s" % h)
self.localhost = new_host
all.add_host(new_host)
elif self._loader.path_exists(host_list):
#TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins'
if self.is_directory(host_list):
@ -128,7 +144,7 @@ class Inventory(object):
self.parser = InventoryDirectory(loader=self._loader, groups=self.groups, filename=host_list)
else:
self.parser = get_file_parser(host_list, self.groups, self._loader)
vars_loader.add_directory(self.basedir(), with_subdir=True)
vars_loader.add_directory(self._basedir, with_subdir=True)
if not self.parser:
# should never happen, but JIC
@ -142,10 +158,12 @@ class Inventory(object):
for g in self.groups:
group = self.groups[g]
group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
self.get_group_vars(group)
# set host vars from host_vars/ files and vars plugins
for host in self.get_hosts():
# get host vars from host_vars/ files and vars plugins
for host in self.get_hosts(ignore_limits_and_restrictions=True):
host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
self.get_host_vars(host)
def _match(self, str, pattern_str):
try:
@ -204,7 +222,7 @@ class Inventory(object):
# exclude hosts mentioned in any restriction (ex: failed hosts)
if self._restriction is not None:
hosts = [ h for h in hosts if h in self._restriction ]
hosts = [ h for h in hosts if h.name in self._restriction ]
seen = set()
HOSTS_PATTERNS_CACHE[pattern_hash] = [x for x in hosts if x not in seen and not seen.add(x)]
@ -439,10 +457,14 @@ class Inventory(object):
for group in groups.values():
if pattern == 'all':
for host in group.get_hosts():
if host.implicit:
continue
__append_host_to_results(host)
else:
if self._match(group.name, pattern) and group.name not in ('all', 'ungrouped'):
for host in group.get_hosts():
if host.implicit:
continue
__append_host_to_results(host)
else:
matching_hosts = self._match_list(group.get_hosts(), 'name', pattern)
@ -455,13 +477,24 @@ class Inventory(object):
return results
def _create_implicit_localhost(self, pattern):
new_host = Host(pattern)
new_host.address = "127.0.0.1"
new_host.vars = self.get_host_vars(new_host)
new_host.set_variable("ansible_connection", "local")
if "ansible_python_interpreter" not in new_host.vars:
new_host.set_variable("ansible_python_interpreter", sys.executable)
self.get_group("ungrouped").add_host(new_host)
if self.localhost:
new_host = self.localhost
else:
new_host = Host(pattern)
new_host.address = "127.0.0.1"
new_host.implicit = True
new_host.vars = self.get_host_vars(new_host)
new_host.set_variable("ansible_connection", "local")
if "ansible_python_interpreter" not in new_host.vars:
py_interp = sys.executable
if not py_interp:
# sys.executable is not set in some cornercases. #13585
display.warning('Unable to determine python interpreter from sys.executable. Using /usr/bin/python default. You can correct this by setting ansible_python_interpreter for localhost')
py_interp = '/usr/bin/python'
new_host.set_variable("ansible_python_interpreter", py_interp)
self.get_group("ungrouped").add_host(new_host)
self.localhost = new_host
return new_host
def clear_pattern_cache(self):
@ -482,23 +515,31 @@ class Inventory(object):
def get_host(self, hostname):
if hostname not in self._hosts_cache:
self._hosts_cache[hostname] = self._get_host(hostname)
if hostname in C.LOCALHOST:
for host in C.LOCALHOST.difference((hostname,)):
self._hosts_cache[host] = self._hosts_cache[hostname]
return self._hosts_cache[hostname]
def _get_host(self, hostname):
if hostname in C.LOCALHOST:
for host in self.get_group('all').get_hosts():
if host.name in C.LOCALHOST:
return host
return self._create_implicit_localhost(hostname)
matching_host = None
for group in self.groups.values():
for host in group.get_hosts():
if hostname == host.name:
matching_host = host
self._hosts_cache[host.name] = host
if hostname in C.LOCALHOST:
if self.localhost:
matching_host= self.localhost
else:
for host in self.get_group('all').get_hosts():
if host.name in C.LOCALHOST:
matching_host = host
break
if not matching_host:
matching_host = self._create_implicit_localhost(hostname)
# update caches
self._hosts_cache[hostname] = matching_host
for host in C.LOCALHOST.difference((hostname,)):
self._hosts_cache[host] = self._hosts_cache[hostname]
else:
for group in self.groups.values():
for host in group.get_hosts():
if host not in self._hosts_cache:
self._hosts_cache[host.name] = host
if hostname == host.name:
matching_host = host
return matching_host
def get_group(self, groupname):
@ -567,9 +608,6 @@ class Inventory(object):
if self.parser is not None:
vars = combine_vars(vars, self.parser.get_host_variables(host))
# Read host_vars/ files
vars = combine_vars(vars, self.get_host_vars(host))
return vars
def add_group(self, group):
@ -600,7 +638,7 @@ class Inventory(object):
return
elif not isinstance(restriction, list):
restriction = [ restriction ]
self._restriction = restriction
self._restriction = [ h.name for h in restriction ]
def subset(self, subset_pattern):
"""
@ -680,6 +718,12 @@ class Inventory(object):
"""
# Only update things if dir is a different playbook basedir
if dir_name != self._playbook_basedir:
# we're changing the playbook basedir, so if we had set one previously
# clear the host/group vars entries from the VariableManager so they're
# not incorrectly used by playbooks from different directories
if self._playbook_basedir:
self._variable_manager.clear_playbook_hostgroup_vars_files(self._playbook_basedir)
self._playbook_basedir = dir_name
# get group vars from group_vars/ files
# TODO: excluding the new_pb_basedir directory may result in group_vars
@ -687,26 +731,51 @@ class Inventory(object):
# we do this shouldn't be too much of an issue. Still, this should
# be fixed at some point to allow a "first load" to touch all of the
# directories, then later runs only touch the new basedir specified
for group in self.groups.values():
#group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
group.vars = combine_vars(group.vars, self.get_group_vars(group))
# get host vars from host_vars/ files
for host in self.get_hosts():
#host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
host.vars = combine_vars(host.vars, self.get_host_vars(host))
found_group_vars = self._find_group_vars_files(self._playbook_basedir)
if found_group_vars:
self._group_vars_files = self._group_vars_files.union(found_group_vars)
for group in self.groups.values():
self.get_group_vars(group)
found_host_vars = self._find_host_vars_files(self._playbook_basedir)
if found_host_vars:
self._host_vars_files = self._host_vars_files.union(found_host_vars)
# get host vars from host_vars/ files
for host in self.get_hosts():
self.get_host_vars(host)
# invalidate cache
self._vars_per_host = {}
self._vars_per_group = {}
def get_host_vars(self, host, new_pb_basedir=False):
def get_host_vars(self, host, new_pb_basedir=False, return_results=False):
""" Read host_vars/ files """
return self._get_hostgroup_vars(host=host, group=None, new_pb_basedir=new_pb_basedir)
return self._get_hostgroup_vars(host=host, group=None, new_pb_basedir=new_pb_basedir, return_results=return_results)
def get_group_vars(self, group, new_pb_basedir=False):
def get_group_vars(self, group, new_pb_basedir=False, return_results=False):
""" Read group_vars/ files """
return self._get_hostgroup_vars(host=None, group=group, new_pb_basedir=new_pb_basedir)
return self._get_hostgroup_vars(host=None, group=group, new_pb_basedir=new_pb_basedir, return_results=return_results)
def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False):
def _find_group_vars_files(self, basedir):
""" Find group_vars/ files """
if basedir in ('', None):
basedir = './'
path = os.path.realpath(os.path.join(basedir, 'group_vars'))
found_vars = set()
if os.path.exists(path):
found_vars = set(os.listdir(unicode(path)))
return found_vars
def _find_host_vars_files(self, basedir):
""" Find host_vars/ files """
if basedir in ('', None):
basedir = './'
path = os.path.realpath(os.path.join(basedir, 'host_vars'))
found_vars = set()
if os.path.exists(path):
found_vars = set(os.listdir(unicode(path)))
return found_vars
def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False, return_results=False):
"""
Loads variables from group_vars/<groupname> and host_vars/<hostname> in directories parallel
to the inventory base directory or in the same directory as the playbook. Variables in the playbook
@ -715,14 +784,15 @@ class Inventory(object):
results = {}
scan_pass = 0
_basedir = self.basedir()
_basedir = self._basedir
_playbook_basedir = self._playbook_basedir
# look in both the inventory base directory and the playbook base directory
# unless we do an update for a new playbook base dir
if not new_pb_basedir:
basedirs = [_basedir, self._playbook_basedir]
if not new_pb_basedir and _playbook_basedir:
basedirs = [_basedir, _playbook_basedir]
else:
basedirs = [self._playbook_basedir]
basedirs = [_basedir]
for basedir in basedirs:
# this can happen from particular API usages, particularly if not run
@ -737,17 +807,22 @@ class Inventory(object):
continue
# save work of second scan if the directories are the same
if _basedir == self._playbook_basedir and scan_pass != 1:
if _basedir == _playbook_basedir and scan_pass != 1:
continue
if group and host is None:
# Before trying to load vars from file, check that the directory contains relvant file names
if host is None and any(map(lambda ext: group.name + ext in self._group_vars_files, C.YAML_FILENAME_EXTENSIONS)):
# load vars in dir/group_vars/name_of_group
base_path = os.path.abspath(os.path.join(to_unicode(basedir, errors='strict'), "group_vars/%s" % group.name))
results = combine_vars(results, self._variable_manager.add_group_vars_file(base_path, self._loader))
elif host and group is None:
base_path = to_unicode(os.path.abspath(os.path.join(to_bytes(basedir), b"group_vars/" + to_bytes(group.name))), errors='strict')
host_results = self._variable_manager.add_group_vars_file(base_path, self._loader)
if return_results:
results = combine_vars(results, host_results)
elif group is None and any(map(lambda ext: host.name + ext in self._host_vars_files, C.YAML_FILENAME_EXTENSIONS)):
# same for hostvars in dir/host_vars/name_of_host
base_path = os.path.abspath(os.path.join(to_unicode(basedir, errors='strict'), "host_vars/%s" % host.name))
results = combine_vars(results, self._variable_manager.add_host_vars_file(base_path, self._loader))
base_path = to_unicode(os.path.abspath(os.path.join(to_bytes(basedir), b"host_vars/" + to_bytes(host.name))), errors='strict')
group_results = self._variable_manager.add_host_vars_file(base_path, self._loader)
if return_results:
results = combine_vars(results, group_results)
# all done, results is a dictionary of variables for this particular host.
return results

View file

@ -140,10 +140,14 @@ class Group:
for kk in kid_hosts:
if kk not in seen:
seen[kk] = 1
if self.name == 'all' and kk.implicit:
continue
hosts.append(kk)
for mine in self.hosts:
if mine not in seen:
seen[mine] = 1
if self.name == 'all' and mine.implicit:
continue
hosts.append(mine)
return hosts

View file

@ -60,6 +60,7 @@ class Host:
uuid=self._uuid,
gathered_facts=self._gathered_facts,
groups=groups,
implicit=self.implicit,
)
def deserialize(self, data):
@ -69,6 +70,7 @@ class Host:
self.vars = data.get('vars', dict())
self.address = data.get('address', '')
self._uuid = data.get('uuid', uuid.uuid4())
self.implicit= data.get('implicit', False)
groups = data.get('groups', [])
for group_data in groups:
@ -89,6 +91,7 @@ class Host:
self._gathered_facts = False
self._uuid = uuid.uuid4()
self.implicit = False
def __repr__(self):
return self.get_name()

View file

@ -124,7 +124,7 @@ class InventoryParser(object):
del pending_declarations[groupname]
continue
elif line.startswith('['):
elif line.startswith('[') and line.endswith(']'):
self._raise_error("Invalid section entry: '%s'. Please make sure that there are no spaces" % line + \
"in the section entry, and that there are no other invalid characters")

View file

@ -27,6 +27,7 @@ from ansible.inventory.group import Group
from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range
from ansible.parsing.utils.addresses import parse_address
from ansible.compat.six import string_types
class InventoryParser(object):
"""
@ -77,6 +78,11 @@ class InventoryParser(object):
self.groups[group] = Group(name=group)
if isinstance(group_data, dict):
#make sure they are dicts
for section in ['vars', 'children', 'hosts']:
if section in group_data and isinstance(group_data[section], string_types):
group_data[section] = { group_data[section]: None}
if 'vars' in group_data:
for var in group_data['vars']:
if var != 'ansible_group_priority':

View file

@ -15,3 +15,6 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Note: Do not add any code to this file. module_utils may be a namespace
# package when using Ansible-2.1+ Anything in this file may not be available
# if one of the other packages in the namespace is loaded first.

View file

@ -24,7 +24,10 @@ import os
import re
import sys
import copy
import importlib
import inspect
from packaging.version import Version
from os.path import expanduser
from ansible.module_utils.basic import *
@ -66,33 +69,38 @@ CIDR_PATTERN = re.compile("(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){
AZURE_SUCCESS_STATE = "Succeeded"
AZURE_FAILED_STATE = "Failed"
AZURE_MIN_VERSION = "2016-03-30"
HAS_AZURE = True
HAS_AZURE_EXC = None
HAS_MSRESTAZURE = True
HAS_MSRESTAZURE_EXC = None
# NB: packaging issue sometimes cause msrestazure not to be installed, check it separately
try:
from msrest.serialization import Serializer
except ImportError as exc:
HAS_MSRESTAZURE_EXC = exc
HAS_MSRESTAZURE = False
try:
from enum import Enum
from msrest.serialization import Serializer
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.compute import __version__ as azure_compute_version
from azure.mgmt.network.models import PublicIPAddress, NetworkSecurityGroup, SecurityRule, NetworkInterface, \
NetworkInterfaceIPConfiguration, Subnet
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network.network_management_client import NetworkManagementClient,\
NetworkManagementClientConfiguration
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient,\
ResourceManagementClientConfiguration
from azure.mgmt.storage.storage_management_client import StorageManagementClient,\
StorageManagementClientConfiguration
from azure.mgmt.compute.compute_management_client import ComputeManagementClient,\
ComputeManagementClientConfiguration
from azure.mgmt.network.version import VERSION as network_client_version
from azure.mgmt.storage.version import VERSION as storage_client_version
from azure.mgmt.compute.version import VERSION as compute_client_version
from azure.mgmt.resource.version import VERSION as resource_client_version
from azure.mgmt.network.network_management_client import NetworkManagementClient
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient
from azure.mgmt.storage.storage_management_client import StorageManagementClient
from azure.mgmt.compute.compute_management_client import ComputeManagementClient
from azure.storage.cloudstorageaccount import CloudStorageAccount
except ImportError, exc:
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
def azure_id_to_dict(id):
pieces = re.sub(r'^\/', '', id).split('/')
result = {}
@ -103,6 +111,15 @@ def azure_id_to_dict(id):
return result
AZURE_EXPECTED_VERSIONS = dict(
storage_client_version="0.30.0rc5",
compute_client_version="0.30.0rc5",
network_client_version="0.30.0rc5",
resource_client_version="0.30.0rc5"
)
AZURE_MIN_RELEASE = '2.0.0rc5'
class AzureRMModuleBase(object):
def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False,
@ -133,12 +150,13 @@ class AzureRMModuleBase(object):
supports_check_mode=supports_check_mode,
required_if=merged_required_if)
if not HAS_AZURE:
self.fail("The Azure Python SDK is not installed (try 'pip install azure') - {0}".format(HAS_AZURE_EXC))
if not HAS_MSRESTAZURE:
self.fail("Do you have msrestazure installed? Try `pip install msrestazure`"
"- {0}".format(HAS_MSRESTAZURE_EXC))
if azure_compute_version < AZURE_MIN_VERSION:
self.fail("Expecting azure.mgmt.compute.__version__ to be >= {0}. Found version {1} "
"Do you have Azure >= 2.0.0rc2 installed?".format(AZURE_MIN_VERSION, azure_compute_version))
if not HAS_AZURE:
self.fail("Do you have azure>={1} installed? Try `pip install 'azure>={1}' --upgrade`"
"- {0}".format(HAS_AZURE_EXC, AZURE_MIN_RELEASE))
self._network_client = None
self._storage_client = None
@ -146,7 +164,7 @@ class AzureRMModuleBase(object):
self._compute_client = None
self.check_mode = self.module.check_mode
self.facts_module = facts_module
self.debug = self.module.params.get('debug')
# self.debug = self.module.params.get('debug')
# authenticate
self.credentials = self._get_credentials(self.module.params)
@ -178,6 +196,13 @@ class AzureRMModuleBase(object):
res = self.exec_module(**self.module.params)
self.module.exit_json(**res)
def check_client_version(self, client_name, client_version, expected_version):
# Ensure Azure modules are at least 2.0.0rc5.
if Version(client_version) < Version(expected_version):
self.fail("Installed {0} client version is {1}. The supported version is {2}. Try "
"`pip install azure>={3} --upgrade`".format(client_name, client_version, expected_version,
AZURE_MIN_RELEASE))
def exec_module(self, **kwargs):
self.fail("Error: {0} failed to implement exec_module method.".format(self.__class__.__name__))
@ -194,12 +219,12 @@ class AzureRMModuleBase(object):
def log(self, msg, pretty_print=False):
pass
# Use only during module development
# if self.debug:
# log_file = open('azure_rm.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, indent=4, sort_keys=True))
# else:
# log_file.write(msg + u'\n')
#if self.debug:
# log_file = open('azure_rm.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, indent=4, sort_keys=True))
# else:
# log_file.write(msg + u'\n')
def validate_tags(self, tags):
'''
@ -215,52 +240,6 @@ class AzureRMModuleBase(object):
if not isinstance(value, str):
self.fail("Tags values must be strings. Found {0}:{1}".format(str(key), str(value)))
def _tag_purge(self, tags):
'''
Remove metadata tags not found in user provided tags parameter. Returns tuple
with bool indicating something changed and dict of new tags to be assigned to
the object.
:param tags: object metadata tags
:return: bool, dict of tags
'''
if not self.module.params.get('tags'):
# purge all tags
return True, dict()
new_tags = copy.copy(tags)
changed = False
for key in tags:
if not self.module.params['tags'].get(key):
# key not found in user provided parameters
new_tags.pop(key)
changed = True
if changed:
self.log('CHANGED: purged tags')
return changed, new_tags
def _tag_update(self, tags):
'''
Update metadata tags with values in user provided tags parameter. Returns
tuple with bool indicating something changed and dict of new tags to be
assigned to the object.
:param tags: object metadata tags
:return: bool, dict of tags
'''
if isinstance(tags, dict):
new_tags = copy.copy(tags)
else:
new_tags = dict()
changed = False
if self.module.params.get('tags'):
for key, value in self.module.params['tags'].items():
if not (new_tags.get(key) and new_tags[key] == value):
changed = True
new_tags[key] = value
if changed:
self.log('CHANGED: updated tags')
return changed, new_tags
def update_tags(self, tags):
'''
Call from the module to update metadata tags. Returns tuple
@ -270,15 +249,18 @@ class AzureRMModuleBase(object):
:param tags: metadata tags from the object
:return: bool, dict
'''
new_tags = copy.copy(tags) if isinstance(tags, dict) else dict()
changed = False
updated, new_tags = self._tag_update(tags)
if updated:
changed = True
if not self.module.params['append_tags']:
purged, new_tags = self._tag_purge(new_tags)
if purged:
changed = True
if isinstance(self.module.params.get('tags'), dict):
for key, value in self.module.params['tags'].iteritems():
if not new_tags.get(key) or new_tags[key] != value:
changed = True
new_tags[key] = value
if isinstance(tags, dict):
for key, value in tags.iteritems():
if not self.module.params['tags'].get(key):
new_tags.pop(key)
changed = True
return changed, new_tags
def has_tags(self, obj_tags, tag_list):
@ -323,7 +305,7 @@ class AzureRMModuleBase(object):
return self.rm_client.resource_groups.get(resource_group)
except CloudError:
self.fail("Parameter error: resource group {0} not found".format(resource_group))
except Exception, exc:
except Exception as exc:
self.fail("Error retrieving resource group {0} - {1}".format(resource_group, str(exc)))
def _get_profile(self, profile="default"):
@ -331,7 +313,7 @@ class AzureRMModuleBase(object):
try:
config = ConfigParser.ConfigParser()
config.read(path)
except Exception, exc:
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
@ -341,21 +323,21 @@ class AzureRMModuleBase(object):
except:
pass
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
if credentials.get('subscription_id'):
return credentials
return None
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile'] is not None:
if env_credentials['profile']:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials['client_id'] is not None:
if env_credentials.get('subscription_id') is not None:
return env_credentials
return None
@ -367,7 +349,7 @@ class AzureRMModuleBase(object):
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
arg_credentials[attribute] = params.get(attribute, None)
# try module params
@ -376,7 +358,7 @@ class AzureRMModuleBase(object):
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['client_id'] is not None:
if arg_credentials['subscription_id']:
self.log('Received credentials from parameters.')
return arg_credentials
@ -394,18 +376,27 @@ class AzureRMModuleBase(object):
return None
def serialize_obj(self, obj, class_name):
def serialize_obj(self, obj, class_name, enum_modules=[]):
'''
Return a JSON representation of an Azure object.
:param obj: Azure object
:param class_name: Name of the object's class
:param enum_modules: List of module names to build enum dependencies from.
:return: serialized result
'''
serializer = Serializer()
dependencies = dict()
if enum_modules:
for module_name in enum_modules:
mod = importlib.import_module(module_name)
for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass):
dependencies[mod_class_name] = mod_class_obj
self.log("dependencies: ");
self.log(str(dependencies))
serializer = Serializer(classes=dependencies)
return serializer.body(obj, class_name)
def get_poller_result(self, poller, wait=20):
def get_poller_result(self, poller, wait=5):
'''
Consistent method of waiting on and retrieving results from Azure's long poller
@ -418,7 +409,7 @@ class AzureRMModuleBase(object):
self.log("Waiting for {0} sec".format(delay))
poller.wait(timeout=delay)
return poller.result()
except Exception, exc:
except Exception as exc:
self.log(str(exc))
raise
@ -463,15 +454,13 @@ class AzureRMModuleBase(object):
# Get keys from the storage account
self.log('Getting keys')
account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name)
keys['key1'] = account_keys.key1
keys['key2'] = account_keys.key2
except Exception, exc:
except Exception as exc:
self.fail("Error getting keys for account {0} - {1}".format(storage_account_name, str(exc)))
try:
self.log('Create blob service')
return CloudStorageAccount(storage_account_name, keys['key1']).create_block_blob_service()
except Exception, exc:
return CloudStorageAccount(storage_account_name, account_keys.keys[0].value).create_block_blob_service()
except Exception as exc:
self.fail("Error creating blob service client for storage account {0} - {1}".format(storage_account_name,
str(exc)))
@ -508,7 +497,7 @@ class AzureRMModuleBase(object):
self.log('Creating default public IP {0}'.format(public_ip_name))
try:
poller = self.network_client.public_ip_addresses.create_or_update(resource_group, public_ip_name, params)
except Exception, exc:
except Exception as exc:
self.fail("Error creating {0} - {1}".format(public_ip_name, str(exc)))
return self.get_poller_result(poller)
@ -578,7 +567,7 @@ class AzureRMModuleBase(object):
poller = self.network_client.network_security_groups.create_or_update(resource_group,
security_group_name,
parameters)
except Exception, exc:
except Exception as exc:
self.fail("Error creating default security rule {0} - {1}".format(security_group_name, str(exc)))
return self.get_poller_result(poller)
@ -589,16 +578,15 @@ class AzureRMModuleBase(object):
# time we attempt to use the requested client.
resource_client = self.rm_client
resource_client.providers.register(key)
except Exception, exc:
except Exception as exc:
self.fail("One-time registration of {0} failed - {1}".format(key, str(exc)))
@property
def storage_client(self):
self.log('Getting storage client...')
if not self._storage_client:
config = StorageManagementClientConfiguration(self.azure_credentials, self.subscription_id)
config.add_user_agent(ANSIBLE_USER_AGENT)
self._storage_client = StorageManagementClient(config)
self.check_client_version('storage', storage_client_version, AZURE_EXPECTED_VERSIONS['storage_client_version'])
self._storage_client = StorageManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Storage')
return self._storage_client
@ -606,9 +594,8 @@ class AzureRMModuleBase(object):
def network_client(self):
self.log('Getting network client')
if not self._network_client:
config = NetworkManagementClientConfiguration(self.azure_credentials, self.subscription_id)
config.add_user_agent(ANSIBLE_USER_AGENT)
self._network_client = NetworkManagementClient(config)
self.check_client_version('network', network_client_version, AZURE_EXPECTED_VERSIONS['network_client_version'])
self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Network')
return self._network_client
@ -616,17 +603,15 @@ class AzureRMModuleBase(object):
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
config = ResourceManagementClientConfiguration(self.azure_credentials, self.subscription_id)
config.add_user_agent(ANSIBLE_USER_AGENT)
self._resource_client = ResourceManagementClient(config)
self.check_client_version('resource', resource_client_version, AZURE_EXPECTED_VERSIONS['resource_client_version'])
self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id)
return self._resource_client
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
config = ComputeManagementClientConfiguration(self.azure_credentials, self.subscription_id)
config.add_user_agent(ANSIBLE_USER_AGENT)
self._compute_client = ComputeManagementClient(config)
self.check_client_version('compute', compute_client_version, AZURE_EXPECTED_VERSIONS['compute_client_version'])
self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Compute')
return self._compute_client

View file

@ -27,8 +27,8 @@
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True]
BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False]
BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 'True', 1, True]
BOOLEANS_FALSE = ['no', 'off', '0', 'false', 'False', 0, False]
BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
# ansible modules can be written in any language. To simplify
@ -136,10 +136,10 @@ except ImportError:
try:
import simplejson as json
except ImportError:
print('{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
HAVE_SELINUX=False
@ -178,6 +178,8 @@ except ImportError:
except ImportError:
pass
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
try:
from ast import literal_eval
except ImportError:
@ -219,14 +221,10 @@ except ImportError:
_literal_eval = literal_eval
from ansible import __version__
# Backwards compat. New code should just import and use __version__
ANSIBLE_VERSION = __version__
# Internal global holding passed in params and constants. This is consulted
# in case multiple AnsibleModules are created. Otherwise each AnsibleModule
# would attempt to read from stdin. Other code should not use this directly
# as it is an internal implementation detail
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS=dict(
@ -524,9 +522,59 @@ def is_executable(path):
or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
class AnsibleFallbackNotFound(Exception):
pass
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if sys.version_info >= (3,):
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if sys.version_info < (3,):
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if sys.version_info < (3,):
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
@ -536,6 +584,23 @@ def env_fallback(*args, **kwargs):
else:
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
class AnsibleFallbackNotFound(Exception):
pass
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
@ -562,7 +627,7 @@ class AnsibleModule(object):
self.run_command_environ_update = {}
self.aliases = {}
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity']
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity', '_ansible_selinux_special_fs', '_ansible_version', '_ansible_syslog_facility']
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
@ -578,7 +643,7 @@ class AnsibleModule(object):
except Exception:
e = get_exception()
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('{"failed": true, "msg": "Module alias error: %s"}' % str(e))
print('\n{"failed": true, "msg": "Module alias error: %s"}' % str(e))
sys.exit(1)
# Save parameter values that should never be logged
@ -613,6 +678,7 @@ class AnsibleModule(object):
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
}
if not bypass_checks:
self._check_required_arguments()
@ -764,7 +830,7 @@ class AnsibleModule(object):
return (uid, gid)
def find_mount_point(self, path):
path = os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
path = os.path.realpath(os.path.expanduser(os.path.expandvars(path)))
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
@ -785,7 +851,7 @@ class AnsibleModule(object):
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self.constants['SELINUX_SPECIAL_FS']:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
@ -1178,7 +1244,8 @@ class AnsibleModule(object):
return aliases_results
def _check_arguments(self, check_invalid_arguments):
for (k,v) in self.params.items():
self._syslog_facility = 'LOG_USER'
for (k,v) in list(self.params.items()):
if k == '_ansible_check_mode' and v:
if not self.supports_check_mode:
@ -1197,6 +1264,15 @@ class AnsibleModule(object):
elif k == '_ansible_verbosity':
self._verbosity = v
elif k == '_ansible_selinux_special_fs':
self._selinux_special_fs = v
elif k == '_ansible_syslog_facility':
self._syslog_facility = v
elif k == '_ansible_version':
self.ansible_version = v
elif check_invalid_arguments and k not in self._legal_inputs:
self.fail_json(msg="unsupported parameter for module: %s" % k)
@ -1270,9 +1346,28 @@ class AnsibleModule(object):
if type(choices) == list:
if k in self.params:
if self.params[k] not in choices:
choices_str=",".join([str(c) for c in choices])
msg="value of %s must be one of: %s, got: %s" % (k, choices_str, self.params[k])
self.fail_json(msg=msg)
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if self.params[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
FALSEY = frozenset(BOOLEANS_FALSE)
overlap = FALSEY.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(self.params[k],) = overlap
if self.params[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
TRUTHY = frozenset(BOOLEANS_TRUE)
overlap = TRUTHY.intersection(choices)
if len(overlap) == 1:
(self.params[k],) = overlap
if self.params[k] not in choices:
choices_str=",".join([str(c) for c in choices])
msg="value of %s must be one of: %s, got: %s" % (k, choices_str, self.params[k])
self.fail_json(msg=msg)
else:
self.fail_json(msg="internal error: do not know how to interpret argument_spec")
@ -1390,7 +1485,7 @@ class AnsibleModule(object):
if isinstance(value, float):
return value
if isinstance(value, basestring):
if isinstance(value, (bytes, unicode, int)):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
@ -1399,6 +1494,16 @@ class AnsibleModule(object):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (unicode, bytes)):
return value.strip()
else:
if isinstance(value (list, tuple, dict)):
return json.dumps(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
@ -1460,54 +1565,18 @@ class AnsibleModule(object):
continue
def _load_params(self):
''' read the input and set the params attribute. Sets the constants as well.'''
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if sys.version_info >= (3,):
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if sys.version_info < (3,):
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if sys.version_info < (3,):
params = json_dict_unicode_to_bytes(params)
try:
self.params = params['ANSIBLE_MODULE_ARGS']
self.constants = params['ANSIBLE_MODULE_CONSTANTS']
except KeyError:
# This helper used too early for fail_json to work.
print('{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS and ANSIBLE_MODULE_CONSTANTS in json data from stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % os.path.basename(__file__)
facility = getattr(syslog, self.constants.get('SYSLOG_FACILITY', 'LOG_USER'), syslog.LOG_USER)
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
@ -1560,16 +1629,17 @@ class AnsibleModule(object):
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
passwd_keys = ['password', 'login_password']
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
arg_type = arg_opts.get('type', 'str')
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
elif param in passwd_keys:
# try to capture all passwords/passphrase named fields
elif arg_type != 'bool' and PASSWORD_MATCH.search(param):
log_args[param] = 'NOT_LOGGING_PASSWORD'
else:
param_val = self.params[param]
@ -1693,7 +1763,7 @@ class AnsibleModule(object):
kwargs['invocation'] = {'module_args': self.params}
kwargs = remove_values(kwargs, self.no_log_values)
self.do_cleanup_files()
print(self.jsonify(kwargs))
print('\n%s' % self.jsonify(kwargs))
sys.exit(0)
def fail_json(self, **kwargs):
@ -1705,7 +1775,7 @@ class AnsibleModule(object):
kwargs['invocation'] = {'module_args': self.params}
kwargs = remove_values(kwargs, self.no_log_values)
self.do_cleanup_files()
print(self.jsonify(kwargs))
print('\n%s' % self.jsonify(kwargs))
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
@ -1739,7 +1809,7 @@ class AnsibleModule(object):
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(filename, 'rb')
infile = open(os.path.realpath(filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
@ -1975,6 +2045,21 @@ class AnsibleModule(object):
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ziploader
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths \
if not x.endswith('/ansible_modlib.zip') \
and not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
# create a printable version of the command for use
# in reporting later, which strips out things like
# passwords from the args list
@ -2016,7 +2101,6 @@ class AnsibleModule(object):
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=os.environ,
)
if cwd and os.path.isdir(cwd):

View file

@ -93,6 +93,9 @@ class AnsibleCloudStack(object):
# these keys will be compared case sensitive in self.has_changed()
self.case_sensitive_keys = [
'id',
'displaytext',
'displayname',
'description',
]
self.module = module
@ -154,12 +157,27 @@ class AnsibleCloudStack(object):
continue
if key in current_dict:
if self.case_sensitive_keys and key in self.case_sensitive_keys:
if str(value) != str(current_dict[key]):
if isinstance(value, (int, float, long, complex)):
# ensure we compare the same type
if isinstance(value, int):
current_dict[key] = int(current_dict[key])
elif isinstance(value, float):
current_dict[key] = float(current_dict[key])
elif isinstance(value, long):
current_dict[key] = long(current_dict[key])
elif isinstance(value, complex):
current_dict[key] = complex(current_dict[key])
if value != current_dict[key]:
return True
else:
if self.case_sensitive_keys and key in self.case_sensitive_keys:
if value != current_dict[key].encode('utf-8'):
return True
# Test for diff in case insensitive way
elif value.lower() != current_dict[key].encode('utf-8').lower():
return True
# Test for diff in case insensitive way
elif str(value).lower() != str(current_dict[key]).lower():
return True
else:
return True
return False
@ -245,6 +263,9 @@ class AnsibleCloudStack(object):
zone = self.module.params.get('zone')
zones = self.cs.listZones()
if not zones:
self.module.fail_json(msg="No zones available. Please create a zone first")
# use the first zone if no zone param given
if not zone:
self.zone = zones['zone'][0]

View file

@ -22,6 +22,7 @@ import json
import sys
import copy
from distutils.version import LooseVersion
from urlparse import urlparse
from ansible.module_utils.basic import *
@ -37,7 +38,7 @@ try:
from docker.constants import DEFAULT_TIMEOUT_SECONDS, DEFAULT_DOCKER_API_VERSION
from docker.utils.types import Ulimit, LogConfig
from docker import auth
except ImportError, exc:
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc)
HAS_DOCKER_PY = False
@ -151,7 +152,7 @@ class AnsibleDockerClient(Client):
if not HAS_DOCKER_PY:
self.fail("Failed to import docker-py - %s. Try `pip install docker-py`" % HAS_DOCKER_ERROR)
if docker_version < MIN_DOCKER_VERSION:
if LooseVersion(docker_version) < LooseVersion(MIN_DOCKER_VERSION):
self.fail("Error: docker-py version is %s. Minimum version required is %s." % (docker_version,
MIN_DOCKER_VERSION))
@ -161,9 +162,9 @@ class AnsibleDockerClient(Client):
try:
super(AnsibleDockerClient, self).__init__(**self._connect_params)
except APIError, exc:
except APIError as exc:
self.fail("Docker API error: %s" % exc)
except Exception, exc:
except Exception as exc:
self.fail("Error connecting: %s" % exc)
def log(self, msg, pretty_print=False):
@ -233,7 +234,7 @@ class AnsibleDockerClient(Client):
tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
'DOCKER_TLS_HOSTNAME', 'localhost'),
api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
DEFAULT_DOCKER_API_VERSION),
'auto'),
cacert_path=self._get_value('cacert_path', params['cacert_path'], 'DOCKER_CERT_PATH', None),
cert_path=self._get_value('cert_path', params['cert_path'], 'DOCKER_CERT_PATH', None),
key_path=self._get_value('key_path', params['key_path'], 'DOCKER_CERT_PATH', None),
@ -262,7 +263,7 @@ class AnsibleDockerClient(Client):
try:
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError, exc:
except TLSParameterError as exc:
self.fail("TLS config error: %s" % exc)
def _get_connect_params(self):
@ -372,9 +373,9 @@ class AnsibleDockerClient(Client):
if container['Id'] == name:
result = container
break
except SSLError, exc:
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception, exc:
except Exception as exc:
self.fail("Error retrieving container list: %s" % exc)
if result is not None:
@ -382,7 +383,7 @@ class AnsibleDockerClient(Client):
self.log("Inspecting container Id %s" % result['Id'])
result = self.inspect_container(container=result['Id'])
self.log("Completed container inspection")
except Exception, exc:
except Exception as exc:
self.fail("Error inspecting container: %s" % exc)
return result
@ -411,7 +412,7 @@ class AnsibleDockerClient(Client):
if len(images) == 1:
try:
inspection = self.inspect_image(images[0]['Id'])
except Exception, exc:
except Exception as exc:
self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
return inspection
@ -431,9 +432,10 @@ class AnsibleDockerClient(Client):
images = response
if tag:
lookup = "%s:%s" % (name, tag)
images = []
for image in response:
self.log(image, pretty_print=True)
if image.get('RepoTags') and lookup in image.get('RepoTags'):
tags = image.get('RepoTags')
if tags and lookup in tags:
images = [image]
break
return images
@ -444,8 +446,7 @@ class AnsibleDockerClient(Client):
'''
self.log("Pulling image %s:%s" % (name, tag))
try:
for line in self.pull(name, tag=tag, stream=True):
line = json.loads(line)
for line in self.pull(name, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('error'):
if line.get('errorDetail'):
@ -455,7 +456,7 @@ class AnsibleDockerClient(Client):
error_detail.get('message')))
else:
self.fail("Error pulling %s - %s" % (name, line.get('error')))
except Exception, exc:
except Exception as exc:
self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
return self.find_image(name, tag)

View file

@ -1,20 +1,29 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# This file is part of Ansible
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re

View file

@ -31,7 +31,13 @@ import struct
import datetime
import getpass
import pwd
import ConfigParser
try:
# python2
import ConfigParser as configparser
except ImportError:
# python3
import configparser
from ansible.module_utils.basic import get_all_subclasses
# py2 vs py3; replace with six via ziploader
@ -40,7 +46,12 @@ try:
except ImportError:
from io import StringIO
from string import maketrans
try:
# python2
from string import maketrans
except ImportError:
# python3
maketrans = str.maketrans
try:
import selinux
@ -147,6 +158,7 @@ class Facts(object):
{ 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
{ 'path' : '/usr/pkg/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' },
{ 'path' : '/usr/local/bin/brew', 'name' : 'homebrew' },
@ -260,10 +272,10 @@ class Facts(object):
fact = json.loads(out)
except ValueError:
# load raw ini
cp = ConfigParser.ConfigParser()
cp = configparser.ConfigParser()
try:
cp.readfp(StringIO(out))
except ConfigParser.Error:
except configparser.Error:
fact = "error loading fact - please check content"
else:
fact = {}
@ -1127,6 +1139,8 @@ class LinuxHardware(Hardware):
mtab = get_file_content('/etc/mtab', '')
for line in mtab.split('\n'):
fields = line.rstrip('\n').split()
if len(fields) < 4:
continue
if fields[0].startswith('/') or ':/' in fields[0]:
if(fields[2] != 'none'):
size_total, size_available = self._get_mount_size_facts(fields[1])
@ -1176,7 +1190,8 @@ class LinuxHardware(Hardware):
sysfs_no_links = 0
try:
path = os.readlink(os.path.join("/sys/block/", block))
except OSError, e:
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EINVAL:
path = block
sysfs_no_links = 1

View file

@ -27,18 +27,29 @@
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
import os
import traceback
from distutils.version import LooseVersion
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
try:
from libcloud.compute.types import Provider
import libcloud
from libcloud.compute.providers import get_driver
HAS_LIBCLOUD_BASE = True
except ImportError:
HAS_LIBCLOUD_BASE = False
USER_AGENT_PRODUCT="Ansible-gce"
USER_AGENT_VERSION="v1"
def gce_connect(module, provider=None):
"""Return a Google Cloud Engine connection."""
if not HAS_LIBCLOUD_BASE:
module.fail_json(msg='libcloud must be installed to use this module')
service_account_email = module.params.get('service_account_email', None)
credentials_file = module.params.get('credentials_file', None)
pem_file = module.params.get('pem_file', None)
project_id = module.params.get('project_id', None)
@ -50,6 +61,8 @@ def gce_connect(module, provider=None):
project_id = os.environ.get('GCE_PROJECT', None)
if not pem_file:
pem_file = os.environ.get('GCE_PEM_FILE_PATH', None)
if not credentials_file:
credentials_file = os.environ.get('GCE_CREDENTIALS_FILE_PATH', pem_file)
# If we still don't have one or more of our credentials, attempt to
# get the remaining values from the libcloud secrets file.
@ -62,25 +75,41 @@ def gce_connect(module, provider=None):
if hasattr(secrets, 'GCE_PARAMS'):
if not service_account_email:
service_account_email = secrets.GCE_PARAMS[0]
if not pem_file:
pem_file = secrets.GCE_PARAMS[1]
if not credentials_file:
credentials_file = secrets.GCE_PARAMS[1]
keyword_params = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
if not project_id:
project_id = keyword_params.get('project', None)
# If we *still* don't have the credentials we need, then it's time to
# just fail out.
if service_account_email is None or pem_file is None or project_id is None:
if service_account_email is None or credentials_file is None or project_id is None:
module.fail_json(msg='Missing GCE connection parameters in libcloud '
'secrets file.')
return None
else:
# We have credentials but lets make sure that if they are JSON we have the minimum
# libcloud requirement met
try:
# Try to read credentials as JSON
with open(credentials_file) as credentials:
json.loads(credentials.read())
# If the credentials are proper JSON and we do not have the minimum
# required libcloud version, bail out and return a descriptive error
if LooseVersion(libcloud.__version__) < '0.17.0':
module.fail_json(msg='Using JSON credentials but libcloud minimum version not met. '
'Upgrade to libcloud>=0.17.0.')
return None
except ValueError, e:
# Not JSON
pass
# Allow for passing in libcloud Google DNS (e.g, Provider.GOOGLE)
if provider is None:
provider = Provider.GCE
try:
gce = get_driver(provider)(service_account_email, pem_file,
gce = get_driver(provider)(service_account_email, credentials_file,
datacenter=module.params.get('zone', None),
project=project_id)
gce.connection.user_agent_append("%s/%s" % (

View file

@ -77,9 +77,14 @@ class Cli(object):
key_filename = self.module.params['ssh_keyfile']
timeout = self.module.params['timeout']
allow_agent = (key_filename is not None) or (key_filename is None and password is None)
try:
self.shell = Shell(kickstart=False, prompts_re=CLI_PROMPTS_RE, errors_re=CLI_ERRORS_RE)
self.shell.open(host, port=port, username=username, password=password, key_filename=key_filename, timeout=timeout)
self.shell = Shell(kickstart=False, prompts_re=CLI_PROMPTS_RE,
errors_re=CLI_ERRORS_RE)
self.shell.open(host, port=port, username=username,
password=password, key_filename=key_filename,
allow_agent=allow_agent, timeout=timeout)
except ShellError:
e = get_exception()
msg = 'failed to connect to %s:%s - %s' % (host, port, str(e))

View file

@ -1,20 +1,29 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# This file is part of Ansible
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re

View file

@ -93,9 +93,12 @@ class Cli(object):
password = self.module.params['password']
key_filename = self.module.params['ssh_keyfile']
allow_agent = (key_filename is not None) or (key_filename is None and password is None)
try:
self.shell = Shell()
self.shell.open(host, port=port, username=username, password=password, key_filename=key_filename)
self.shell.open(host, port=port, username=username, password=password,
key_filename=key_filename, allow_agent=allow_agent)
except ShellError:
e = get_exception()
msg = 'failed to connect to %s:%s - %s' % (host, port, str(e))
@ -152,9 +155,10 @@ class Netconf(object):
user = self.module.params['username']
passwd = self.module.params['password']
key_filename = self.module.params['ssh_keyfile']
self.device = Device(host, user=user, passwd=passwd, port=port,
gather_facts=False).open()
gather_facts=False, ssh_private_key_file=key_filename).open()
self.config = Config(self.device)
@ -350,6 +354,8 @@ def get_module(**kwargs):
module.fail_json(msg='paramiko is required but does not appear to be installed')
elif module.params['transport'] == 'netconf' and not HAS_PYEZ:
module.fail_json(msg='junos-eznc >= 1.2.2 is required but does not appear to be installed')
elif module.params['transport'] == 'netconf' and not HAS_JXMLEASE:
module.fail_json(msg='jxmlease is required but does not appear to be installed')
module.connect()
return module

View file

@ -1,24 +1,32 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# This file is part of Ansible
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
import collections
import itertools
import shlex
@ -109,10 +117,9 @@ class NetworkConfig(object):
return self._config
def __str__(self):
config = collections.OrderedDict()
for item in self._config:
self.expand(item, config)
return '\n'.join(self.flatten(config))
if self._device_os == 'junos':
return self.to_lines(self.expand(self.items))
return self.to_block(self.expand(self.items))
def load(self, contents):
self._config = parse(contents, indent=self.indent)
@ -154,26 +161,29 @@ class NetworkConfig(object):
regexp = r'%s' % regexp
return re.findall(regexp, str(self))
def expand(self, obj, items):
block = [item.raw for item in obj.parents]
block.append(obj.raw)
def to_lines(self, section):
lines = list()
for entry in section[1:]:
line = ['set']
line.extend([p.text for p in entry.parents])
line.append(entry.text)
lines.append(' '.join(line))
return lines
current_level = items
for b in block:
if b not in current_level:
current_level[b] = collections.OrderedDict()
current_level = current_level[b]
for c in obj.children:
if c.raw not in current_level:
current_level[c.raw] = collections.OrderedDict()
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def flatten(self, data, obj=None):
if obj is None:
obj = list()
for k, v in data.items():
obj.append(k)
self.flatten(v, obj)
return obj
def expand(self, objs):
visited = set()
expanded = list()
for o in objs:
for p in o.parents:
if p not in visited:
visited.add(p)
expanded.append(p)
expanded.append(o)
visited.add(o)
return expanded
def get_object(self, path):
for item in self.items:
@ -229,13 +239,20 @@ class NetworkConfig(object):
if self._device_os == 'junos':
return updates
diffs = dict()
changes = list()
for update in updates:
if replace == 'block' and update.parents:
update = update.parents[-1]
self.expand(update, diffs)
if replace == 'block':
if update.parents:
changes.append(update.parents[-1])
for child in update.parents[-1].children:
changes.append(child)
else:
changes.append(update)
else:
changes.append(update)
updates = self.expand(changes)
return self.flatten(diffs)
return [item.text for item in self.expand(updates)]
def _build_children(self, children, parents=None, offset=0):
for item in children:
@ -259,6 +276,8 @@ class NetworkConfig(object):
config.append(line)
if parent:
parent.children.append(line)
if parent.parents:
line.parents.append(*parent.parents)
line.parents.append(parent)
parent = line
offset += self.indent
@ -382,7 +401,7 @@ class Conditional(object):
return self.number(value) <= self.value
def contains(self, value):
return self.value in value
return str(self.value) in value

View file

@ -35,7 +35,8 @@ NET_COMMON_ARGS = dict(
transport=dict(default='cli', choices=['cli', 'nxapi']),
use_ssl=dict(default=False, type='bool'),
validate_certs=dict(default=True, type='bool'),
provider=dict(type='dict')
provider=dict(type='dict'),
timeout=dict(default=10, type='int')
)
NXAPI_COMMAND_TYPES = ['cli_show', 'cli_show_ascii', 'cli_conf', 'bash']
@ -168,11 +169,17 @@ class Cli(object):
username = self.module.params['username']
password = self.module.params['password']
timeout = self.module.params['timeout']
key_filename = self.module.params['ssh_keyfile']
allow_agent = (key_filename is not None) or (key_filename is None and password is None)
try:
self.shell = Shell(kickstart=False, prompts_re=CLI_PROMPTS_RE, errors_re=CLI_ERRORS_RE)
self.shell.open(host, port=port, username=username, password=password, key_filename=key_filename)
self.shell = Shell(kickstart=False, prompts_re=CLI_PROMPTS_RE,
errors_re=CLI_ERRORS_RE)
self.shell.open(host, port=port, username=username,
password=password, key_filename=key_filename,
allow_agent=allow_agent, timeout=timeout)
except ShellError:
e = get_exception()
msg = 'failed to connect to %s:%s - %s' % (host, port, str(e))

View file

@ -114,7 +114,20 @@ class Rest(object):
if not port:
port = 80
self.baseurl = '%s://%s:%s/rest/v1' % (proto, host, port)
baseurl = '%s://%s:%s' % (proto, host, port)
headers = dict({'Content-Type': 'application/x-www-form-urlencoded'})
# Get a cookie and save it the rest of the operations.
url = '%s/%s' % (baseurl, 'login')
data = 'username=%s&password=%s' % (self.module.params['username'],
self.module.params['password'])
resp, hdrs = fetch_url(self.module, url, data=data,
headers=headers, method='POST')
# Update the base url for the rest of the operations.
self.baseurl = '%s/rest/v1' % (baseurl)
self.headers = dict({'Content-Type': 'application/json',
'Accept': 'application/json',
'Cookie': resp.headers.get('Set-Cookie')})
def _url_builder(self, path):
if path[0] == '/':
@ -127,7 +140,7 @@ class Rest(object):
if headers is None:
headers = dict()
headers.update({'Content-Type': 'application/json'})
headers.update(self.headers)
resp, hdrs = fetch_url(self.module, url, data=data, headers=headers,
method=method)

View file

@ -32,7 +32,6 @@ import os
import re
from uuid import UUID
from ansible import __version__
from ansible.module_utils.basic import BOOLEANS
FINAL_STATUSES = ('ACTIVE', 'ERROR')
@ -264,7 +263,7 @@ def rax_required_together():
def setup_rax_module(module, rax_module, region_required=True):
"""Set up pyrax in a standard way for all modules"""
rax_module.USER_AGENT = 'ansible/%s %s' % (__version__,
rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version,
rax_module.USER_AGENT)
api_key = module.params.get('api_key')

View file

@ -27,6 +27,7 @@ except ImportError:
try:
import paramiko
from paramiko.ssh_exception import AuthenticationException
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
@ -101,12 +102,17 @@ class Shell(object):
if not look_for_keys:
look_for_keys = password is None
self.ssh.connect(host, port=port, username=username, password=password,
timeout=timeout, look_for_keys=look_for_keys, pkey=pkey,
key_filename=key_filename, allow_agent=allow_agent)
try:
self.ssh.connect(host, port=port, username=username, password=password,
timeout=timeout, look_for_keys=look_for_keys, pkey=pkey,
key_filename=key_filename, allow_agent=allow_agent)
self.shell = self.ssh.invoke_shell()
self.shell.settimeout(timeout)
self.shell = self.ssh.invoke_shell()
self.shell.settimeout(timeout)
except socket.gaierror:
raise ShellError("unable to resolve host name")
except AuthenticationException:
raise ShellError('Unable to authenticate to remote device')
if self.kickstart:
self.shell.sendall("\n")

View file

@ -81,7 +81,6 @@
# agrees to be bound by the terms and conditions of this License
# Agreement.
import httplib
import netrc
import os
import re
@ -91,7 +90,13 @@ import platform
import tempfile
import base64
from ansible.module_utils.basic import get_distribution
from ansible.module_utils.basic import get_distribution, get_exception
try:
import httplib
except ImportError:
# Python 3
import http.client as httplib
try:
import urllib2
@ -626,6 +631,13 @@ class SSLValidationHandler(urllib2.BaseHandler):
use_proxy = self.detect_no_proxy(req.get_full_url())
if not use_proxy:
try:
# cleanup the temp file created, don't worry
# if it fails for some reason
os.remove(tmp_ca_cert_path)
except:
pass
# ignore proxy settings for this host request
return req
@ -664,7 +676,8 @@ class SSLValidationHandler(urllib2.BaseHandler):
# close the ssl connection
#ssl_s.unwrap()
s.close()
except (ssl.SSLError, socket.error), e:
except (ssl.SSLError, socket.error):
e = get_exception()
# fail if we tried all of the certs but none worked
if 'connection refused' in str(e).lower():
raise ConnectionError('Failed to connect to %s:%s.' % (self.hostname, self.port))
@ -887,27 +900,34 @@ def fetch_url(module, url, data=None, headers=None, method=None,
url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth,
follow_redirects=follow_redirects)
info.update(r.info())
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), url=r.geturl(), status=r.getcode()))
except NoSSLError, e:
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), url=r.geturl(), status=r.code))
except NoSSLError:
e = get_exception()
distribution = get_distribution()
if distribution is not None and distribution.lower() == 'redhat':
module.fail_json(msg='%s. You can also install python-ssl from EPEL' % str(e))
else:
module.fail_json(msg='%s' % str(e))
except (ConnectionError, ValueError), e:
except (ConnectionError, ValueError):
e = get_exception()
module.fail_json(msg=str(e))
except urllib2.HTTPError, e:
except urllib2.HTTPError:
e = get_exception()
try:
body = e.read()
except AttributeError:
body = ''
info.update(dict(msg=str(e), status=e.code, body=body, **e.info()))
except urllib2.URLError, e:
info.update(dict(msg=str(e), body=body, **e.info()))
info['status'] = e.code
except urllib2.URLError:
e = get_exception()
code = int(getattr(e, 'code', -1))
info.update(dict(msg="Request failed: %s" % str(e), status=code))
except socket.error, e:
except socket.error:
e = get_exception()
info.update(dict(msg="Connection failure: %s" % str(e), status=-1))
except Exception, e:
except Exception:
e = get_exception()
info.update(dict(msg="An unknown error occurred: %s" % str(e), status=-1))
return r, info

View file

@ -46,7 +46,8 @@ def vca_argument_spec():
api_version=dict(default=DEFAULT_VERSION),
service_type=dict(default=DEFAULT_SERVICE_TYPE, choices=SERVICE_MAP.keys()),
vdc_name=dict(),
gateway_name=dict(default='gateway')
gateway_name=dict(default='gateway'),
verify_certs=dict(type='bool', default=True)
)
class VcaAnsibleModule(AnsibleModule):
@ -110,7 +111,7 @@ class VcaAnsibleModule(AnsibleModule):
def create_instance(self):
service_type = self.params.get('service_type', DEFAULT_SERVICE_TYPE)
if service_type == 'vcd':
if service_type == 'vcd':
host = self.params['host']
else:
host = LOGIN_HOST[service_type]
@ -130,8 +131,12 @@ class VcaAnsibleModule(AnsibleModule):
service_type = self.params['service_type']
password = self.params['password']
if not self.vca.login(password=password):
self.fail('Login to VCA failed', response=self.vca.response.content)
login_org = None
if service_type == 'vcd':
login_org = self.params['org']
if not self.vca.login(password=password, org=login_org):
self.fail('Login to VCA failed', response=self.vca.response)
try:
method_name = 'login_%s' % service_type
@ -140,7 +145,7 @@ class VcaAnsibleModule(AnsibleModule):
except AttributeError:
self.fail('no login method exists for service_type %s' % service_type)
except VcaError, e:
self.fail(e.message, response=self.vca.response.content, **e.kwargs)
self.fail(e.message, response=self.vca.response, **e.kwargs)
def login_vca(self):
instance_id = self.params['instance_id']
@ -155,14 +160,14 @@ class VcaAnsibleModule(AnsibleModule):
org = self.params['org']
if not org:
raise VcaError('missing required or for service_type vchs')
raise VcaError('missing required org for service_type vchs')
self.vca.login_to_org(service_id, org)
def login_vcd(self):
org = self.params['org']
if not org:
raise VcaError('missing required or for service_type vchs')
raise VcaError('missing required org for service_type vcd')
if not self.vca.token:
raise VcaError('unable to get token for service_type vcd')

@ -1 +1 @@
Subproject commit 9eb2b557cd08f2a6d381ec0360fa47750146b65a
Subproject commit 29dfc6a5a14d4f9bda21276d04c492149b81b8b8

@ -1 +1 @@
Subproject commit d9caac037cf10f0abaeff1430605387ab011d54f
Subproject commit 72f961ab96e863ad6bd1ca22fbc04563e783c6c5

View file

@ -160,12 +160,13 @@ class DataLoader():
if not file_name or not isinstance(file_name, string_types):
raise AnsibleParserError("Invalid filename: '%s'" % str(file_name))
if not self.path_exists(file_name) or not self.is_file(file_name):
b_file_name = to_bytes(file_name)
if not self.path_exists(b_file_name) or not self.is_file(b_file_name):
raise AnsibleFileNotFound("the file_name '%s' does not exist, or is not readable" % file_name)
show_content = True
try:
with open(file_name, 'rb') as f:
with open(b_file_name, 'rb') as f:
data = f.read()
if self._vault.is_encrypted(data):
data = self._vault.decrypt(data)
@ -330,11 +331,11 @@ class DataLoader():
try:
with open(to_bytes(real_path), 'rb') as f:
data = f.read()
if self._vault.is_encrypted(data):
if self._vault.is_encrypted(f):
# if the file is encrypted and no password was specified,
# the decrypt call would throw an error, but we check first
# since the decrypt function doesn't know the file name
data = f.read()
if not self._vault_password:
raise AnsibleParserError("A vault password must be specified to decrypt %s" % file_path)

View file

@ -30,6 +30,12 @@ from hashlib import sha256
from binascii import hexlify
from binascii import unhexlify
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
# Note: Only used for loading obsolete VaultAES files. All files are written
# using the newer VaultAES256 which does not require md5
from hashlib import md5
@ -70,6 +76,10 @@ try:
HAS_PBKDF2HMAC = True
except ImportError:
pass
except Exception as e:
display.warning("Optional dependency 'cryptography' raised an exception, falling back to 'Crypto'")
import traceback
display.debug("Traceback from import of cryptography was {0}".format(traceback.format_exc()))
from ansible.compat.six import PY3
from ansible.utils.unicode import to_unicode, to_bytes
@ -105,6 +115,12 @@ class VaultLib:
:returns: True if it is recognized. Otherwise, False.
"""
if hasattr(data, 'read'):
current_position = data.tell()
header_part = data.read(len(b_HEADER))
data.seek(current_position)
return self.is_encrypted(header_part)
if to_bytes(data, errors='strict', encoding='utf-8').startswith(b_HEADER):
return True
return False
@ -445,7 +461,7 @@ class VaultEditor:
os.chown(dest, prev.st_uid, prev.st_gid)
def _editor_shell_command(self, filename):
EDITOR = os.environ.get('EDITOR','vim')
EDITOR = os.environ.get('EDITOR','vi')
editor = shlex.split(EDITOR)
editor.append(filename)
@ -471,7 +487,7 @@ class VaultFile(object):
# VaultFile a context manager instead (implement __enter__ and __exit__)
def __del__(self):
self.filehandle.close()
os.unlink(self.tmplfile)
os.unlink(self.tmpfile)
def is_encrypted(self):
peak = self.filehandle.readline()

View file

@ -19,7 +19,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from yaml.constructor import Constructor, ConstructorError
from yaml.constructor import SafeConstructor, ConstructorError
from yaml.nodes import MappingNode
from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode
from ansible.vars.unsafe_proxy import wrap_var
@ -31,7 +31,7 @@ except ImportError:
display = Display()
class AnsibleConstructor(Constructor):
class AnsibleConstructor(SafeConstructor):
def __init__(self, file_name=None):
self._ansible_file_name = file_name
super(AnsibleConstructor, self).__init__()

View file

@ -24,6 +24,7 @@ from ansible.compat.six import PY3
from ansible.parsing.yaml.objects import AnsibleUnicode, AnsibleSequence, AnsibleMapping
from ansible.vars.hostvars import HostVars
from ansible.vars.unsafe_proxy import AnsibleUnsafeText
class AnsibleDumper(yaml.SafeDumper):
'''
@ -45,6 +46,11 @@ AnsibleDumper.add_representer(
represent_unicode,
)
AnsibleDumper.add_representer(
AnsibleUnsafeText,
represent_unicode,
)
AnsibleDumper.add_representer(
HostVars,
represent_hostvars,

View file

@ -61,6 +61,7 @@ class Playbook:
self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
# set the loaders basedir
cur_basedir = self._loader.get_basedir()
self._loader.set_basedir(self._basedir)
self._file_name = file_name
@ -74,6 +75,8 @@ class Playbook:
ds = self._loader.load_from_file(os.path.basename(file_name))
if not isinstance(ds, list):
# restore the basedir in case this error is caught and handled
self._loader.set_basedir(cur_basedir)
raise AnsibleParserError("playbooks must be a list of plays", obj=ds)
# Parse the playbook entries. For plays, we simply parse them
@ -81,6 +84,8 @@ class Playbook:
# PlaybookInclude() object
for entry in ds:
if not isinstance(entry, dict):
# restore the basedir in case this error is caught and handled
self._loader.set_basedir(cur_basedir)
raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
if 'include' in entry:
@ -93,6 +98,9 @@ class Playbook:
entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader)
self._entries.append(entry_obj)
# we're done, so restore the old basedir in the loader
self._loader.set_basedir(cur_basedir)
def get_loader(self):
return self._loader

View file

@ -31,7 +31,7 @@ from ansible.compat.six import iteritems, string_types
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleParserError
from ansible.errors import AnsibleParserError, AnsibleUndefinedVariable
from ansible.parsing.dataloader import DataLoader
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.utils.boolean import boolean
@ -86,6 +86,19 @@ class Base:
# and init vars, avoid using defaults in field declaration as it lives across plays
self.vars = dict()
def dump_me(self, depth=0):
if depth == 0:
print("DUMPING OBJECT ------------------------------------------------------")
print("%s- %s (%s, id=%s)" % (" " * depth, self.__class__.__name__, self, id(self)))
if hasattr(self, '_block') and self.__class__.__name__ == 'Task' and self._block:
self._block.dump_me(depth+2)
for attr_name in ('_parent_block', '_task_include'):
if hasattr(self, attr_name):
attr = getattr(self, attr_name)
if attr is not None:
attr.dump_me(depth+2)
if hasattr(self, '_play') and self._play:
self._play.dump_me(depth+2)
# The following three functions are used to programatically define data
# descriptors (aka properties) for the Attributes of all of the playbook
@ -386,7 +399,7 @@ class Base:
except (TypeError, ValueError) as e:
raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s."
" Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds())
except UndefinedError as e:
except (AnsibleUndefinedVariable, UndefinedError) as e:
if templar._fail_on_undefined_errors and name != 'name':
raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined."
" The error was: %s" % (name,e), obj=self.get_ds())

View file

@ -65,8 +65,6 @@ class Block(Base, Become, Conditional, Taggable):
all_vars = self.vars.copy()
if self._role:
all_vars.update(self._role.get_vars(self._dep_chain))
if self._parent_block:
all_vars.update(self._parent_block.get_vars())
if self._task_include:
@ -271,9 +269,6 @@ class Block(Base, Become, Conditional, Taggable):
if self._parent_block is not None:
if not self._parent_block.evaluate_conditional(templar, all_vars):
return False
elif self._role is not None:
if not self._role.evaluate_conditional(templar, all_vars):
return False
return super(Block, self).evaluate_conditional(templar, all_vars)
def set_loader(self, loader):
@ -388,3 +383,24 @@ class Block(Base, Become, Conditional, Taggable):
def has_tasks(self):
return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0
def get_include_params(self):
if self._parent:
return self._parent.get_include_params()
else:
return dict()
def all_parents_static(self):
'''
Determine if all of the parents of this block were statically loaded
or not. Since Task/TaskInclude objects may be in the chain, they simply
call their parents all_parents_static() method. Only Block objects in
the chain check the statically_loaded value of the parent.
'''
from ansible.playbook.task_include import TaskInclude
if self._task_include and not self._task_include.statically_loaded:
return False
elif self._parent_block:
return self._parent_block.all_parents_static()
return True

View file

@ -19,6 +19,10 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import re
from jinja2.compiler import generate
from jinja2.exceptions import UndefinedError
from ansible.compat.six import text_type
@ -26,6 +30,10 @@ from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
LOOKUP_REGEX = re.compile(r'lookup\s*\(')
VALID_VAR_REGEX = re.compile("^[_A-Za-z][_a-zA-Z0-9]*$")
DEFINED_REGEX = re.compile(r'(hostvars\[.+\]|[\w_]+)\s+(not\s+is|is|is\s+not)\s+(defined|undefined)')
class Conditional:
'''
@ -50,6 +58,18 @@ class Conditional:
if not isinstance(value, list):
setattr(self, name, [ value ])
def extract_defined_undefined(self, conditional):
results = []
cond = conditional
m = DEFINED_REGEX.search(cond)
while m:
results.append(m.groups())
cond = cond[m.end():]
m = DEFINED_REGEX.search(cond)
return results
def evaluate_conditional(self, templar, all_vars):
'''
Loops through the conditionals set on this object, returning
@ -83,21 +103,75 @@ class Conditional:
if conditional is None or conditional == '':
return True
if conditional in all_vars and '-' not in text_type(all_vars[conditional]):
# pull the "bare" var out, which allows for nested conditionals
# and things like:
# - assert:
# that:
# - item
# with_items:
# - 1 == 1
if conditional in all_vars and VALID_VAR_REGEX.match(conditional):
conditional = all_vars[conditional]
# make sure the templar is using the variables specified with this method
templar.set_available_variables(variables=all_vars)
try:
conditional = templar.template(conditional)
# if the conditional is "unsafe", disable lookups
disable_lookups = hasattr(conditional, '__UNSAFE__')
conditional = templar.template(conditional, disable_lookups=disable_lookups)
if not isinstance(conditional, text_type) or conditional == "":
return conditional
# a Jinja2 evaluation that results in something Python can eval!
# update the lookups flag, as the string returned above may now be unsafe
# and we don't want future templating calls to do unsafe things
disable_lookups |= hasattr(conditional, '__UNSAFE__')
# First, we do some low-level jinja2 parsing involving the AST format of the
# statement to ensure we don't do anything unsafe (using the disable_lookup flag above)
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False, inside_yield=False):
if isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Yield):
inside_yield = True
elif isinstance(node, ast.Str):
if disable_lookups:
if inside_call and node.s.startswith("__"):
# calling things with a dunder is generally bad at this point...
raise AnsibleError(
"Invalid access found in the conditional: '%s'" % conditional
)
elif inside_yield:
# we're inside a yield, so recursively parse and traverse the AST
# of the result to catch forbidden syntax from executing
parsed = ast.parse(node.s, mode='exec')
cnv = CleansingNodeVisitor()
cnv.visit(parsed)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(
child_node,
inside_call=inside_call,
inside_yield=inside_yield
)
try:
e = templar.environment.overlay()
e.filters.update(templar._get_filters())
e.tests.update(templar._get_tests())
res = e._parse(conditional, None, None)
res = generate(res, e, None, None)
parsed = ast.parse(res, mode='exec')
cnv = CleansingNodeVisitor()
cnv.visit(parsed)
except Exception as e:
raise AnsibleError("Invalid conditional detected: %s" % e)
# and finally we generate and template the presented string and look at the resulting string
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
conditional = templar.template(presented)
val = conditional.strip()
val = templar.template(presented, disable_lookups=disable_lookups).strip()
if val == "True":
return True
elif val == "False":
@ -105,14 +179,33 @@ class Conditional:
else:
raise AnsibleError("unable to evaluate conditional: %s" % original)
except (AnsibleUndefinedVariable, UndefinedError) as e:
# the templating failed, meaning most likely a
# variable was undefined. If we happened to be
# looking for an undefined variable, return True,
# otherwise fail
if "is undefined" in original:
return True
elif "is defined" in original:
return False
else:
raise AnsibleError("error while evaluating conditional (%s): %s" % (original, e))
# the templating failed, meaning most likely a variable was undefined. If we happened
# to be looking for an undefined variable, return True, otherwise fail
try:
# first we extract the variable name from the error message
var_name = re.compile(r"'(hostvars\[.+\]|[\w_]+)' is undefined").search(str(e)).groups()[0]
# next we extract all defined/undefined tests from the conditional string
def_undef = self.extract_defined_undefined(conditional)
# then we loop through these, comparing the error variable name against
# each def/undef test we found above. If there is a match, we determine
# whether the logic/state mean the variable should exist or not and return
# the corresponding True/False
for (du_var, logic, state) in def_undef:
# when we compare the var names, normalize quotes because something
# like hostvars['foo'] may be tested against hostvars["foo"]
if var_name.replace("'", '"') == du_var.replace("'", '"'):
# the should exist is a xor test between a negation in the logic portion
# against the state (defined or undefined)
should_exist = ('not' in logic) != (state == 'defined')
if should_exist:
return False
else:
return True
# as nothing above matched the failed var name, re-raise here to
# trigger the AnsibleUndefinedVariable exception again below
raise
except Exception as new_e:
raise AnsibleUndefinedVariable(
"error while evaluating conditional (%s): %s" % (original, e)
)

View file

@ -0,0 +1,33 @@
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
#from ansible.inventory.host import Host
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.handler import Handler
class HandlerTaskInclude(Handler, TaskInclude):
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
t = HandlerTaskInclude(block=block, role=role, task_include=task_include)
return t.load_data(data, variable_manager=variable_manager, loader=loader)

View file

@ -38,7 +38,7 @@ def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=Non
return a list of Block() objects, where implicit blocks
are created for each bare Task.
'''
# we import here to prevent a circular dependency with imports
from ansible.playbook.block import Block
@ -81,6 +81,7 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
from ansible.playbook.handler import Handler
from ansible.playbook.task import Task
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.handler_task_include import HandlerTaskInclude
from ansible.template import Templar
assert isinstance(ds, list)
@ -95,7 +96,7 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
play=play,
parent_block=block,
role=role,
task_include=task_include,
task_include=None,
use_handlers=use_handlers,
variable_manager=variable_manager,
loader=loader,
@ -103,50 +104,56 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
task_list.append(t)
else:
if 'include' in task_ds:
t = TaskInclude.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
if use_handlers:
t = HandlerTaskInclude.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
else:
t = TaskInclude.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
all_vars = variable_manager.get_vars(loader=loader, play=play, task=t)
templar = Templar(loader=loader, variables=all_vars)
# check to see if this include is static, which can be true if:
# 1. the user set the 'static' option to true
# check to see if this include is dynamic or static:
# 1. the user has set the 'static' option to false or true
# 2. one of the appropriate config options was set
# 3. the included file name contains no variables, and has no loop
is_static = t.static or \
C.DEFAULT_TASK_INCLUDES_STATIC or \
(use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \
not templar._contains_vars(t.args.get('_raw_params')) and t.loop is None
if t.static is not None:
is_static = t.static
else:
is_static = C.DEFAULT_TASK_INCLUDES_STATIC or \
(use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \
(not templar._contains_vars(t.args['_raw_params']) and t.all_parents_static() and not t.loop)
if is_static:
if t.loop is not None:
raise AnsibleParserError("You cannot use 'static' on an include with a loop", obj=task_ds)
# FIXME: all of this code is very similar (if not identical) to that in
# plugins/strategy/__init__.py, and should be unified to avoid
# patches only being applied to one or the other location
if task_include:
# handle relative includes by walking up the list of parent include
# tasks and checking the relative result to see if it exists
parent_include = task_include
cumulative_path = None
while parent_include is not None:
parent_include_dir = templar.template(os.path.dirname(parent_include.args.get('_raw_params')))
if cumulative_path is None:
cumulative_path = parent_include_dir
elif not os.path.isabs(cumulative_path):
cumulative_path = os.path.join(parent_include_dir, cumulative_path)
include_target = templar.template(t.args['_raw_params'])
if t._role:
new_basedir = os.path.join(t._role._role_path, 'tasks', cumulative_path)
include_file = loader.path_dwim_relative(new_basedir, 'tasks', include_target)
else:
include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
# we set a flag to indicate this include was static
t.statically_loaded = True
if os.path.exists(include_file):
break
else:
parent_include = parent_include._task_include
else:
# handle relative includes by walking up the list of parent include
# tasks and checking the relative result to see if it exists
parent_include = task_include
cumulative_path = None
found = False
while parent_include is not None:
parent_include_dir = templar.template(os.path.dirname(parent_include.args.get('_raw_params')))
if cumulative_path is None:
cumulative_path = parent_include_dir
elif not os.path.isabs(cumulative_path):
cumulative_path = os.path.join(parent_include_dir, cumulative_path)
include_target = templar.template(t.args['_raw_params'])
if t._role:
new_basedir = os.path.join(t._role._role_path, 'tasks', cumulative_path)
include_file = loader.path_dwim_relative(new_basedir, 'tasks', include_target)
else:
include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
if os.path.exists(include_file):
found = True
break
else:
parent_include = parent_include._task_include
if not found:
try:
include_target = templar.template(t.args['_raw_params'])
except AnsibleUndefinedVariable as e:
@ -171,7 +178,13 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
if data is None:
return []
elif not isinstance(data, list):
raise AnsibleError("included task files must contain a list of tasks", obj=data)
raise AnsibleParserError("included task files must contain a list of tasks", obj=data)
# since we can't send callbacks here, we display a message directly in
# the same fashion used by the on_include callback. We also do it here,
# because the recursive nature of helper methods means we may be loading
# nested includes, and we want the include order printed correctly
display.vv("statically included: %s" % include_file)
except AnsibleFileNotFound as e:
if t.static or \
C.DEFAULT_TASK_INCLUDES_STATIC or \
@ -223,7 +236,6 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
b.tags = list(set(b.tags).union(tags))
# END FIXME
# FIXME: send callback here somehow...
# FIXME: handlers shouldn't need this special handling, but do
# right now because they don't iterate blocks correctly
if use_handlers:
@ -233,11 +245,11 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
task_list.extend(included_blocks)
else:
task_list.append(t)
elif use_handlers:
t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
task_list.append(t)
else:
t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
if use_handlers:
t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
else:
t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
task_list.append(t)
return task_list

View file

@ -136,28 +136,6 @@ class Play(Base, Taggable, Become):
return super(Play, self).preprocess_data(ds)
def _load_hosts(self, attr, ds):
'''
Loads the hosts from the given datastructure, which might be a list
or a simple string. We also switch integers in this list back to strings,
as the YAML parser will turn things that look like numbers into numbers.
'''
if isinstance(ds, (string_types, int)):
ds = [ ds ]
if not isinstance(ds, list):
raise AnsibleParserError("'hosts' must be specified as a list or a single pattern", obj=ds)
# YAML parsing of things that look like numbers may have
# resulted in integers showing up in the list, so convert
# them back to strings to prevent problems
for idx,item in enumerate(ds):
if isinstance(item, int):
ds[idx] = "%s" % item
return ds
def _load_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
@ -265,7 +243,7 @@ class Play(Base, Taggable, Become):
if len(self.roles) > 0:
for r in self.roles:
block_list.extend(r.get_handler_blocks())
block_list.extend(r.get_handler_blocks(play=self))
return block_list

View file

@ -21,7 +21,9 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pipes
import pwd
import random
import re
import string
@ -29,11 +31,9 @@ import string
from ansible.compat.six import iteritems, string_types
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.template import Templar
from ansible.utils.boolean import boolean
from ansible.utils.unicode import to_unicode
__all__ = ['PlayContext']
@ -356,7 +356,7 @@ class PlayContext(Base):
# and likewise for the remote user
for user_var in MAGIC_VARIABLE_MAPPING.get('remote_user'):
if user_var in delegated_vars:
if user_var in delegated_vars and delegated_vars[user_var]:
break
else:
delegated_vars['ansible_user'] = task.remote_user or self.remote_user
@ -411,6 +411,12 @@ class PlayContext(Base):
if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
new_info.port = int(C.DEFAULT_REMOTE_PORT)
# if the final connection type is local, reset the remote_user value
# to that of the currently logged in user, to ensure any become settings
# are obeyed correctly
if new_info.connection == 'local':
new_info.remote_user = pwd.getpwuid(os.getuid()).pw_name
# special overrides for the connection setting
if len(delegated_vars) > 0:
# in the event that we were using local before make sure to reset the
@ -447,16 +453,21 @@ class PlayContext(Base):
success_key = None
self.prompt = None
if executable is None:
executable = self.executable
if self.become:
if not executable:
executable = self.executable
becomecmd = None
randbits = ''.join(random.choice(string.ascii_lowercase) for x in range(32))
success_key = 'BECOME-SUCCESS-%s' % randbits
success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd))
if executable:
command = '%s -c %s' % (executable, success_cmd)
else:
command = success_cmd
# set executable to use for the privilege escalation method, with various overrides
exe = self.become_exe or \
getattr(self, '%s_exe' % self.become_method, None) or \
@ -485,9 +496,9 @@ class PlayContext(Base):
# force quick error if password is required but not supplied, should prevent sudo hangs.
if self.become_pass:
prompt = '[sudo via ansible, key=%s] password: ' % randbits
becomecmd = '%s %s -p "%s" -u %s %s -c %s' % (exe, flags.replace('-n',''), prompt, self.become_user, executable, success_cmd)
becomecmd = '%s %s -p "%s" -u %s %s' % (exe, flags.replace('-n',''), prompt, self.become_user, command)
else:
becomecmd = '%s %s -u %s %s -c %s' % (exe, flags, self.become_user, executable, success_cmd)
becomecmd = '%s %s -u %s %s' % (exe, flags, self.become_user, command)
elif self.become_method == 'su':
@ -498,7 +509,7 @@ class PlayContext(Base):
return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data))
prompt = detect_su_prompt
becomecmd = '%s %s %s -c %s' % (exe, flags, self.become_user, pipes.quote('%s -c %s' % (executable, success_cmd)))
becomecmd = '%s %s %s -c %s' % (exe, flags, self.become_user, pipes.quote(command))
elif self.become_method == 'pbrun':
@ -534,7 +545,7 @@ class PlayContext(Base):
exe = self.become_exe or 'dzdo'
becomecmd = '%s -u %s %s -c %s' % (exe, self.become_user, executable, success_cmd)
becomecmd = '%s -u %s %s' % (exe, self.become_user, command)
else:
raise AnsibleError("Privilege escalation method not found: %s" % self.become_method)

View file

@ -96,7 +96,7 @@ class PlaybookInclude(Base, Conditional, Taggable):
# plays. If so, we can take a shortcut here and simply prepend them to
# those attached to each block (if any)
if forward_conditional:
for task_block in entry.tasks:
for task_block in entry.pre_tasks + entry.roles + entry.tasks + entry.post_tasks:
task_block.when = self.when[:] + task_block.when
return pb

View file

@ -118,16 +118,6 @@ class Role(Base, Become, Conditional, Taggable):
if role_include.role not in play.ROLE_CACHE:
play.ROLE_CACHE[role_include.role] = dict()
if parent_role:
if parent_role.when:
new_when = parent_role.when[:]
new_when.extend(r.when or [])
r.when = new_when
if parent_role.tags:
new_tags = parent_role.tags[:]
new_tags.extend(r.tags or [])
r.tags = new_tags
play.ROLE_CACHE[role_include.role][hashed_params] = r
return r
@ -311,12 +301,24 @@ class Role(Base, Become, Conditional, Taggable):
def get_task_blocks(self):
return self._task_blocks[:]
def get_handler_blocks(self):
def get_handler_blocks(self, play, dep_chain=None):
block_list = []
# update the dependency chain here
if dep_chain is None:
dep_chain = []
new_dep_chain = dep_chain + [self]
for dep in self.get_direct_dependencies():
dep_blocks = dep.get_handler_blocks()
dep_blocks = dep.get_handler_blocks(play=play, dep_chain=new_dep_chain)
block_list.extend(dep_blocks)
block_list.extend(self._handler_blocks)
for task_block in self._handler_blocks:
new_task_block = task_block.copy()
new_task_block._dep_chain = new_dep_chain
new_task_block._play = play
block_list.append(new_task_block)
return block_list
def has_run(self, host):

View file

@ -138,18 +138,22 @@ class RoleDefinition(Base, Become, Conditional, Taggable):
# we always start the search for roles in the base directory of the playbook
role_search_paths = [
os.path.join(self._loader.get_basedir(), u'roles'),
self._loader.get_basedir(),
]
# also search in the configured roles path
if C.DEFAULT_ROLES_PATH:
role_search_paths.extend(C.DEFAULT_ROLES_PATH)
# finally, append the roles basedir, if it was set, so we can
# next, append the roles basedir, if it was set, so we can
# search relative to that directory for dependent roles
if self._role_basedir:
role_search_paths.append(self._role_basedir)
# finally as a last resort we look in the current basedir as set
# in the loader (which should be the playbook dir itself) but without
# the roles/ dir appended
role_search_paths.append(self._loader.get_basedir())
# create a templar class to template the dependency names, in
# case they contain variables
if self._variable_manager is not None:

View file

@ -84,7 +84,7 @@ class Task(Base, Conditional, Taggable, Become):
_notify = FieldAttribute(isa='list')
_poll = FieldAttribute(isa='int')
_register = FieldAttribute(isa='string')
_retries = FieldAttribute(isa='int', default=3)
_retries = FieldAttribute(isa='int')
_until = FieldAttribute(isa='list', default=[])
def __init__(self, block=None, role=None, task_include=None):
@ -105,7 +105,7 @@ class Task(Base, Conditional, Taggable, Become):
def get_name(self):
''' return the name of the task '''
if self._role and self.name:
if self._role and self.name and ("%s : " % self._role._role_name) not in self.name:
return "%s : %s" % (self._role.get_name(), self.name)
elif self.name:
return self.name
@ -196,7 +196,7 @@ class Task(Base, Conditional, Taggable, Become):
if 'vars' in ds:
# _load_vars is defined in Base, and is used to load a dictionary
# or list of dictionaries in a standard way
new_ds['vars'] = self._load_vars(None, ds.pop('vars'))
new_ds['vars'] = self._load_vars(None, ds.get('vars'))
else:
new_ds['vars'] = dict()
@ -245,13 +245,6 @@ class Task(Base, Conditional, Taggable, Become):
super(Task, self).post_validate(templar)
def _post_validate_register(self, attr, value, templar):
'''
Override post validation for the register args field, which is not
supposed to be templated
'''
return value
def _post_validate_loop_args(self, attr, value, templar):
'''
Override post validation for the loop args field, which is templated
@ -454,3 +447,42 @@ class Task(Base, Conditional, Taggable, Become):
'''
return self._get_parent_attribute('any_errors_fatal')
def _get_attr_loop(self):
return self._attributes['loop']
def _get_attr_loop_control(self):
return self._attributes['loop_control']
def get_dep_chain(self):
if self._parent:
return self._parent.get_dep_chain()
else:
return None
def get_search_path(self):
'''
Return the list of paths you should search for files, in order.
This follows role/playbook dependency chain.
'''
path_stack = []
dep_chain = self.get_dep_chain()
# inside role: add the dependency chain from current to dependant
if dep_chain:
path_stack.extend(reversed([x._role_path for x in dep_chain]))
# add path of task itself, unless it is already in the list
task_dir = os.path.dirname(self.get_path())
if task_dir not in path_stack:
path_stack.append(task_dir)
return path_stack
def all_parents_static(self):
if self._task_include and not self._task_include.statically_loaded:
return False
elif self._block:
return self._block.all_parents_static()
return True

View file

@ -41,13 +41,22 @@ class TaskInclude(Task):
# =================================================================================
# ATTRIBUTES
_static = FieldAttribute(isa='bool', default=False)
_static = FieldAttribute(isa='bool', default=None)
def __init__(self, block=None, role=None, task_include=None):
super(TaskInclude, self).__init__(block=block, role=role, task_include=task_include)
self.statically_loaded = False
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
t = TaskInclude(block=block, role=role, task_include=task_include)
return t.load_data(data, variable_manager=variable_manager, loader=loader)
def copy(self, exclude_block=False):
new_me = super(TaskInclude, self).copy(exclude_block=exclude_block)
new_me.statically_loaded = self.statically_loaded
return new_me
def get_vars(self):
'''
We override the parent Task() classes get_vars here because

View file

@ -145,15 +145,15 @@ class PluginLoader:
def _get_package_paths(self):
''' Gets the path of a Python package '''
paths = []
if not self.package:
return []
if not hasattr(self, 'package_path'):
m = __import__(self.package)
parts = self.package.split('.')[1:]
self.package_path = os.path.join(os.path.dirname(m.__file__), *parts)
paths.extend(self._all_directories(self.package_path))
return paths
for parent_mod in parts:
m = getattr(m, parent_mod)
self.package_path = os.path.dirname(m.__file__)
return self._all_directories(self.package_path)
def _get_paths(self):
''' Return a list of paths to search for plugins in '''
@ -353,6 +353,7 @@ class PluginLoader:
def all(self, *args, **kwargs):
''' instantiates all plugins with the same arguments '''
path_only = kwargs.pop('path_only', False)
class_only = kwargs.pop('class_only', False)
all_matches = []
@ -364,6 +365,10 @@ class PluginLoader:
if '__init__' in name:
continue
if path_only:
yield path
continue
if path not in self._module_cache:
self._module_cache[path] = self._load_module_source(name, path)

View file

@ -30,13 +30,16 @@ import tempfile
import time
from abc import ABCMeta, abstractmethod
from ansible.compat.six import binary_type, text_type, iteritems, with_metaclass
from ansible import constants as C
from ansible.compat.six import binary_type, string_types, text_type, iteritems, with_metaclass
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.executor.module_common import modify_module
from ansible.playbook.play_context import MAGIC_VARIABLE_MAPPING
from ansible.release import __version__
from ansible.parsing.utils.jsonify import jsonify
from ansible.utils.unicode import to_bytes, to_unicode
from ansible.vars.unsafe_proxy import wrap_var
try:
from __main__ import display
@ -240,7 +243,8 @@ class ActionBase(with_metaclass(ABCMeta, object)):
raise AnsibleConnectionFailure(output)
try:
rc = self._connection._shell.join_path(result['stdout'].strip(), u'').splitlines()[-1]
stdout_parts = result['stdout'].strip().split('%s=' % basefile, 1)
rc = self._connection._shell.join_path(stdout_parts[-1], u'').splitlines()[-1]
except IndexError:
# stdout was empty or just space, set to / to trigger error in next if
rc = '/'
@ -291,7 +295,29 @@ class ActionBase(with_metaclass(ABCMeta, object)):
return remote_path
def _fixup_perms(self, remote_path, remote_user, execute=False, recursive=True):
def _fixup_perms(self, remote_path, remote_user, execute=True, recursive=True):
"""
We need the files we upload to be readable (and sometimes executable)
by the user being sudo'd to but we want to limit other people's access
(because the files could contain passwords or other private
information.
Deprecated in favor of _fixup_perms2. Ansible code has been updated to
use _fixup_perms2. This code is maintained to provide partial support
for custom actions (non-recursive mode only).
"""
display.deprecated('_fixup_perms is deprecated. Use _fixup_perms2 instead.', version='2.4', removed=False)
if recursive:
raise AnsibleError('_fixup_perms with recursive=True (the default) is no longer supported. ' +
'Use _fixup_perms2 if support for previous releases is not required. '
'Otherwise use fixup_perms with recursive=False.')
return self._fixup_perms2([remote_path], remote_user, execute)
def _fixup_perms2(self, remote_paths, remote_user, execute=True):
"""
We need the files we upload to be readable (and sometimes executable)
by the user being sudo'd to but we want to limit other people's access
@ -299,17 +325,17 @@ class ActionBase(with_metaclass(ABCMeta, object)):
information. We achieve this in one of these ways:
* If no sudo is performed or the remote_user is sudo'ing to
themselves, we don't have to change permisions.
themselves, we don't have to change permissions.
* If the remote_user sudo's to a privileged user (for instance, root),
we don't have to change permissions
* If the remote_user is a privileged user and sudo's to an
unprivileged user then we change the owner of the file to the
unprivileged user so they can read it.
* If the remote_user is an unprivieged user and we're sudo'ing to
a second unprivileged user then we attempt to grant the second
unprivileged user access via file system acls.
* If granting file system acls fails we can set the file to be world
readable so that the second unprivileged user can read the file.
* If the remote_user sudo's to an unprivileged user then we attempt to
grant the unprivileged user access via file system acls.
* If granting file system acls fails we try to change the owner of the
file with chown which only works in case the remote_user is
privileged or the remote systems allows chown calls by unprivileged
users (e.g. HP-UX)
* If the chown fails we can set the file to be world readable so that
the second unprivileged user can read the file.
Since this could allow other users to get access to private
information we only do this ansible is configured with
"allow_world_readable_tmpfiles" in the ansible.cfg
@ -317,51 +343,39 @@ class ActionBase(with_metaclass(ABCMeta, object)):
if self._connection._shell.SHELL_FAMILY == 'powershell':
# This won't work on Powershell as-is, so we'll just completely skip until
# we have a need for it, at which point we'll have to do something different.
return remote_path
if remote_path is None:
# Sometimes code calls us naively -- it has a var which could
# contain a path to a tmp dir but doesn't know if it needs to
# exist or not. If there's no path, then there's no need for us
# to do work
self._display.debug('_fixup_perms called with remote_path==None. Sure this is correct?')
return remote_path
return remote_paths
if self._play_context.become and self._play_context.become_user not in ('root', remote_user):
# Unprivileged user that's different than the ssh user. Let's get
# to work!
# Try chown'ing the file. This will only work if our SSH user has
# root privileges, but since we can't reliably determine that from
# the username (think "toor" on FreeBSD), let's just try first and
# apologize later:
res = self._remote_chown(remote_path, self._play_context.become_user, recursive=recursive)
if res['rc'] == 0:
# root can read things that don't have read bit but can't
# execute them without the execute bit, so we might need to
# set that even if we're root. We just ran chown successfully,
# so apparently we are root.
# Try to use file system acls to make the files readable for sudo'd
# user
if execute:
mode = 'rx'
else:
mode = 'rX'
res = self._remote_set_user_facl(remote_paths, self._play_context.become_user, mode)
if res['rc'] != 0:
# File system acls failed; let's try to use chown next
# Set executable bit first as on some systems an
# unprivileged user can use chown
if execute:
res = self._remote_chmod('u+x', remote_path, recursive=recursive)
res = self._remote_chmod(remote_paths, 'u+x')
if res['rc'] != 0:
raise AnsibleError('Failed to set file mode on remote temporary files (rc: {0}, err: {1})'.format(res['rc'], res['stderr']))
elif remote_user == 'root':
raise AnsibleError('Failed to change ownership of the temporary files Ansible needs to create despite connecting as root. Unprivileged become user would be unable to read the file.')
else:
# Chown'ing failed. We're probably lacking root privileges; let's try something else.
if execute:
mode = 'rx'
else:
mode = 'rX'
# Try to use fs acls to solve this problem
res = self._remote_set_user_facl(remote_path, self._play_context.become_user, mode, recursive=recursive, sudoable=False)
if res['rc'] != 0:
res = self._remote_chown(remote_paths, self._play_context.become_user)
if res['rc'] != 0 and remote_user == 'root':
# chown failed even if remove_user is root
raise AnsibleError('Failed to change ownership of the temporary files Ansible needs to create despite connecting as root. Unprivileged become user would be unable to read the file.')
elif res['rc'] != 0:
if C.ALLOW_WORLD_READABLE_TMPFILES:
# fs acls failed -- do things this insecure way only
# if the user opted in in the config file
self._display.warning('Using world-readable permissions for temporary files Ansible needs to create when becoming an unprivileged user which may be insecure. For information on securing this, see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user')
res = self._remote_chmod('a+%s' % mode, remote_path, recursive=recursive)
# chown and fs acls failed -- do things this insecure
# way only if the user opted in in the config file
display.warning('Using world-readable permissions for temporary files Ansible needs to create when becoming an unprivileged user which may be insecure. For information on securing this, see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user')
res = self._remote_chmod(remote_paths, 'a+%s' % mode)
if res['rc'] != 0:
raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], res['stderr']))
else:
@ -370,33 +384,33 @@ class ActionBase(with_metaclass(ABCMeta, object)):
# Can't depend on the file being transferred with execute
# permissions. Only need user perms because no become was
# used here
res = self._remote_chmod('u+x', remote_path, recursive=recursive)
res = self._remote_chmod(remote_paths, 'u+x')
if res['rc'] != 0:
raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], res['stderr']))
return remote_path
return remote_paths
def _remote_chmod(self, mode, path, recursive=True, sudoable=False):
def _remote_chmod(self, paths, mode, sudoable=False):
'''
Issue a remote chmod command
'''
cmd = self._connection._shell.chmod(mode, path, recursive=recursive)
cmd = self._connection._shell.chmod(paths, mode)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_chown(self, path, user, group=None, recursive=True, sudoable=False):
def _remote_chown(self, paths, user, sudoable=False):
'''
Issue a remote chown command
'''
cmd = self._connection._shell.chown(path, user, group, recursive=recursive)
cmd = self._connection._shell.chown(paths, user)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_set_user_facl(self, path, user, mode, recursive=True, sudoable=False):
def _remote_set_user_facl(self, paths, user, mode, sudoable=False):
'''
Issue a remote call to setfacl
'''
cmd = self._connection._shell.set_user_facl(path, user, mode, recursive=recursive)
cmd = self._connection._shell.set_user_facl(paths, user, mode)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
@ -423,10 +437,12 @@ class ActionBase(with_metaclass(ABCMeta, object)):
# happens sometimes when it is a dir and not on bsd
if not 'checksum' in mystat['stat']:
mystat['stat']['checksum'] = ''
elif not isinstance(mystat['stat']['checksum'], string_types):
raise AnsibleError("Invalid checksum returned by stat: expected a string type but got %s" % type(mystat['stat']['checksum']))
return mystat['stat']
def _remote_checksum(self, path, all_vars):
def _remote_checksum(self, path, all_vars, follow=False):
'''
Produces a remote checksum given a path,
Returns a number 0-4 for specific errors instead of checksum, also ensures it is different
@ -438,7 +454,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
'''
x = "0" # unknown error has occured
try:
remote_stat = self._execute_remote_stat(path, all_vars, follow=False)
remote_stat = self._execute_remote_stat(path, all_vars, follow=follow)
if remote_stat['exists'] and remote_stat['isdir']:
x = "3" # its a directory not a file
else:
@ -480,21 +496,49 @@ class ActionBase(with_metaclass(ABCMeta, object)):
else:
return initial_fragment
def _filter_leading_non_json_lines(self, data):
@staticmethod
def _filter_non_json_lines(data):
'''
Used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
need to filter anything which does not start with '{', '[', or is an empty line.
Have to be careful how we filter trailing junk as multiline JSON is valid.
'''
idx = 0
for line in data.splitlines(True):
if line.startswith((u'{', u'[')):
# Filter initial junk
lines = data.splitlines()
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
endchar = u'}'
break
idx = idx + len(line)
elif line.startswith(u'['):
endchar = u']'
break
else:
display.debug('No start of json char found')
raise ValueError('No start of json char found')
return data[idx:]
# Filter trailing junk
lines = lines[start:]
lines.reverse()
for end, line in enumerate(lines):
if line.strip().endswith(endchar):
break
else:
display.debug('No end of json char found')
raise ValueError('No end of json char found')
if end < len(lines) - 1:
# Trailing junk is uncommon and can point to things the user might
# want to change. So print a warning if we find any
trailing_junk = lines[:end]
trailing_junk.reverse()
display.warning('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
lines = lines[end:]
lines.reverse()
return '\n'.join(lines)
def _strip_success_message(self, data):
'''
@ -539,7 +583,16 @@ class ActionBase(with_metaclass(ABCMeta, object)):
module_args['_ansible_diff'] = self._play_context.diff
# let module know our verbosity
module_args['_ansible_verbosity'] = self._display.verbosity
module_args['_ansible_verbosity'] = display.verbosity
# give the module information about the ansible version
module_args['_ansible_version'] = __version__
# set the syslog facility to be used in the module
module_args['_ansible_syslog_facility'] = task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)
# let module know about filesystems that selinux treats specially
module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS
(module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
if not shebang:
@ -566,7 +619,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
# the remote system, which can be read and parsed by the module
args_data = ""
for k,v in iteritems(module_args):
args_data += '%s="%s" ' % (k, pipes.quote(text_type(v)))
args_data += '%s=%s ' % (k, pipes.quote(text_type(v)))
self._transfer_data(args_file_path, args_data)
elif module_style == 'non_native_want_json':
self._transfer_data(args_file_path, json.dumps(module_args))
@ -574,9 +627,17 @@ class ActionBase(with_metaclass(ABCMeta, object)):
environment_string = self._compute_environment_string()
remote_files = None
if args_file_path:
remote_files = tmp, remote_module_path, args_file_path
elif remote_module_path:
remote_files = tmp, remote_module_path
# Fix permissions of the tmp path and tmp files. This should be
# called after all files have been transferred.
self._fixup_perms(tmp, remote_user, recursive=True)
if remote_files:
self._fixup_perms2(remote_files, remote_user)
cmd = ""
in_data = None
@ -625,12 +686,52 @@ class ActionBase(with_metaclass(ABCMeta, object)):
display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
return data
def _clean_returned_data(self, data):
remove_keys = set()
fact_keys = set(data.keys())
# first we add all of our magic variable names to the set of
# keys we want to remove from facts
for magic_var in MAGIC_VARIABLE_MAPPING:
remove_keys.update(fact_keys.intersection(MAGIC_VARIABLE_MAPPING[magic_var]))
# next we remove any connection plugin specific vars
for conn_path in self._shared_loader_obj.connection_loader.all(path_only=True):
try:
conn_name = os.path.splitext(os.path.basename(conn_path))[0]
re_key = re.compile('^ansible_%s_' % conn_name)
for fact_key in fact_keys:
if re_key.match(fact_key):
remove_keys.add(fact_key)
except AttributeError:
pass
# remove some KNOWN keys
for hard in ['ansible_rsync_path', 'ansible_playbook_python']:
if hard in fact_keys:
remove_keys.add(hard)
# finally, we search for interpreter keys to remove
re_interp = re.compile('^ansible_.*_interpreter$')
for fact_key in fact_keys:
if re_interp.match(fact_key):
remove_keys.add(fact_key)
# then we remove them (except for ssh host keys)
for r_key in remove_keys:
if not r_key.startswith('ansible_ssh_host_key_'):
del data[r_key]
def _parse_returned_data(self, res):
try:
data = json.loads(self._filter_leading_non_json_lines(res.get('stdout', u'')))
data = json.loads(self._filter_non_json_lines(res.get('stdout', u'')))
data['_ansible_parsed'] = True
if 'ansible_facts' in data and isinstance(data['ansible_facts'], dict):
self._clean_returned_data(data['ansible_facts'])
data['ansible_facts'] = wrap_var(data['ansible_facts'])
if 'add_host' in data and isinstance(data['add_host'].get('host_vars', None), dict):
self._clean_returned_data(data['add_host']['host_vars'])
data['add_host'] = wrap_var(data['add_host'])
except ValueError:
# not valid json, lets try to capture error
data = dict(failed=True, parsed=False)
data = dict(failed=True, _ansible_parsed=False)
data['msg'] = "MODULE FAILURE"
data['module_stdout'] = res.get('stdout', u'')
if 'stderr' in res:
@ -669,11 +770,23 @@ class ActionBase(with_metaclass(ABCMeta, object)):
if self._connection.allow_executable:
if executable is None:
executable = self._play_context.executable
# mitigation for SSH race which can drop stdout (https://github.com/ansible/ansible/issues/13876)
# only applied for the default executable to avoid interfering with the raw action
cmd = self._connection._shell.append_command(cmd, 'sleep 0')
if executable:
cmd = executable + ' -c ' + pipes.quote(cmd)
display.debug("_low_level_execute_command(): executing: %s" % (cmd,))
rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
# Change directory to basedir of task for command execution when connection is local
if self._connection.transport == 'local':
cwd = os.getcwd()
os.chdir(self._loader.get_basedir())
try:
rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
finally:
if self._connection.transport == 'local':
os.chdir(cwd)
# stdout and stderr may be either a file-like or a bytes object.
# Convert either one to a text type

View file

@ -153,7 +153,7 @@ class ActionModule(ActionBase):
xfered = self._transfer_file(path, remote_path)
# fix file permissions when the copy is done as a different user
self._fixup_perms(tmp, remote_user, recursive=True)
self._fixup_perms2((tmp, remote_path), remote_user)
new_module_args.update( dict( src=xfered,))

View file

@ -70,17 +70,13 @@ class ActionModule(ActionBase):
args_data += '%s="%s" ' % (k, pipes.quote(to_unicode(v)))
argsfile = self._transfer_data(self._connection._shell.join_path(tmp, 'arguments'), args_data)
self._fixup_perms(tmp, remote_user, execute=True, recursive=True)
# Only the following two files need to be executable but we'd have to
# make three remote calls if we wanted to just set them executable.
# There's not really a problem with marking too many of the temp files
# executable so we go ahead and mark them all as executable in the
# line above (the line above is needed in any case [although
# execute=False is okay if we uncomment the lines below] so that all
# the files are readable in case the remote_user and become_user are
# different and both unprivileged)
#self._fixup_perms(remote_module_path, remote_user, execute=True, recursive=False)
#self._fixup_perms(async_module_path, remote_user, execute=True, recursive=False)
remote_paths = tmp, remote_module_path, async_module_path
# argsfile doesn't need to be executable, but this saves an extra call to the remote host
if argsfile:
remote_paths += argsfile,
self._fixup_perms2(remote_paths, remote_user, execute=True)
async_limit = self._task.async
async_jid = str(random.randint(0, 999999999999))
@ -96,4 +92,11 @@ class ActionModule(ActionBase):
result['changed'] = True
if 'skipped' in result and result['skipped'] or 'failed' in result and result['failed']:
return result
# the async_wrapper module returns dumped JSON via its stdout
# response, so we parse it here and replace the result
result = self._parse_returned_data(result)
return result

View file

@ -217,8 +217,10 @@ class ActionModule(ActionBase):
# Define a remote directory that we will copy the file to.
tmp_src = self._connection._shell.join_path(tmp, 'source')
remote_path = None
if not raw:
self._transfer_file(source_full, tmp_src)
remote_path = self._transfer_file(source_full, tmp_src)
else:
self._transfer_file(source_full, dest_file)
@ -227,7 +229,8 @@ class ActionModule(ActionBase):
self._loader.cleanup_tmp_file(source_full)
# fix file permissions when the copy is done as a different user
self._fixup_perms(tmp, remote_user, recursive=True)
if remote_path:
self._fixup_perms2((tmp, remote_path), remote_user)
if raw:
# Continue to next iteration if raw is defined.
@ -245,6 +248,8 @@ class ActionModule(ActionBase):
original_basename=source_rel,
)
)
if 'content' in new_module_args:
del new_module_args['content']
module_return = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=delete_remote_tmp)
module_executed = True

View file

@ -54,7 +54,11 @@ class ActionModule(ActionBase):
try:
results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=True, bare_deprecated=False)
if results == self._task.args['var']:
raise AnsibleUndefinedVariable
# if results is not str/unicode type, raise an exception
if type(results) not in [str, unicode]:
raise AnsibleUndefinedVariable
# If var name is same as result, try to template it
results = self._templar.template("{{" + results + "}}", convert_bare=True, fail_on_undefined=True)
except AnsibleUndefinedVariable:
results = "VARIABLE IS NOT DEFINED!"

View file

@ -64,7 +64,8 @@ class ActionModule(ActionBase):
remote_checksum = None
if not self._play_context.become:
# calculate checksum for the remote file, don't bother if using become as slurp will be used
remote_checksum = self._remote_checksum(source, all_vars=task_vars)
# Force remote_checksum to follow symlinks because fetch always follows symlinks
remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True)
# use slurp if permissions are lacking or privilege escalation is needed
remote_data = None

View file

@ -34,6 +34,8 @@ class ActionModule(ActionBase):
result = super(ActionModule, self).run(tmp, task_vars)
source = self._task.args.get('_raw_params')
if source is None:
raise AnsibleError("No filename was specified to include.", self._task._ds)
if self._task._role:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'vars', source)

View file

@ -75,6 +75,9 @@ class ActionModule(ActionBase):
def _handle_template(self):
src = self._task.args.get('src')
if not src:
return
working_path = self._get_working_path()
if os.path.isabs(src) or urlparse.urlsplit('src').scheme:
@ -93,6 +96,17 @@ class ActionModule(ActionBase):
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)

View file

@ -59,7 +59,7 @@ class ActionModule(ActionBase):
tmp_src = self._connection._shell.join_path(tmp, os.path.basename(src))
self._transfer_file(src, tmp_src)
self._fixup_perms(tmp, remote_user, recursive=True)
self._fixup_perms2((tmp, tmp_src), remote_user)
new_module_args = self._task.args.copy()
new_module_args.update(

View file

@ -123,8 +123,9 @@ class ActionModule(ActionBase):
fd = None
try:
fd = self._connection._new_stdin.fileno()
except ValueError:
# someone is using a closed file descriptor as stdin
except (ValueError, AttributeError):
# ValueError: someone is using a closed file descriptor as stdin
# AttributeError: someone is using a null file descriptor as stdin on windoez
pass
if fd is not None:
if isatty(fd):

View file

@ -37,7 +37,7 @@ class ActionModule(ActionBase):
result['skipped'] = True
return result
executable = self._task.args.get('executable')
executable = self._task.args.get('executable', False)
result.update(self._low_level_execute_command(self._task.args.get('_raw_params'), executable=executable))
return result

View file

@ -19,7 +19,6 @@ __metaclass__ = type
import os
from ansible import constants as C
from ansible.plugins.action import ActionBase
@ -79,9 +78,8 @@ class ActionModule(ActionBase):
tmp_src = self._connection._shell.join_path(tmp, os.path.basename(source))
self._transfer_file(source, tmp_src)
sudoable = True
# set file permissions, more permissive when the copy is done as a different user
self._fixup_perms(tmp, remote_user, execute=True, recursive=True)
self._fixup_perms2((tmp, tmp_src), remote_user, execute=True)
# add preparation steps to one ssh roundtrip executing the script
env_string = self._compute_environment_string()

Some files were not shown because too many files have changed in this diff Show more