Merge pull request #10885 from bcoca/galaxy_v2
revamped base CLI and moved galaxy to it's own classes/api
This commit is contained in:
commit
fec9b8a5ad
17 changed files with 1690 additions and 493 deletions
|
@ -27,6 +27,7 @@ New Modules:
|
|||
* cloudstack: cs_securitygroup_rule
|
||||
* cloudstack: cs_vmsnapshot
|
||||
* maven_artifact
|
||||
* openstack: os_server_facts
|
||||
* pushover
|
||||
* zabbix_host
|
||||
* zabbix_hostmacro
|
||||
|
|
|
@ -40,13 +40,15 @@ def get_config(p, section, key, env_var, default, boolean=False, integer=False,
|
|||
''' return a configuration variable with casting '''
|
||||
value = _get_config(p, section, key, env_var, default)
|
||||
if boolean:
|
||||
return mk_boolean(value)
|
||||
if value and integer:
|
||||
return int(value)
|
||||
if value and floating:
|
||||
return float(value)
|
||||
if value and islist:
|
||||
return [x.strip() for x in value.split(',')]
|
||||
value = mk_boolean(value)
|
||||
if value:
|
||||
if integer:
|
||||
value = int(value)
|
||||
elif floating:
|
||||
value = float(value)
|
||||
elif islist:
|
||||
if isinstance(value, basestring):
|
||||
value = [x.strip() for x in value.split(',')]
|
||||
return value
|
||||
|
||||
def _get_config(p, section, key, env_var, default):
|
||||
|
@ -104,7 +106,7 @@ DEFAULTS='defaults'
|
|||
|
||||
# configurable things
|
||||
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
|
||||
DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'inventory', 'ANSIBLE_INVENTORY', get_config(p, DEFAULTS,'hostfile','ANSIBLE_HOSTS', '/etc/ansible/hosts')))
|
||||
DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', get_config(p, DEFAULTS,'inventory','ANSIBLE_INVENTORY', '/etc/ansible/hosts')))
|
||||
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None)
|
||||
DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles'))
|
||||
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
|
||||
|
@ -203,10 +205,16 @@ ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_fi
|
|||
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True)
|
||||
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
|
||||
|
||||
# galaxy related
|
||||
DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com')
|
||||
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
|
||||
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', ['git','hg'], islist=True)
|
||||
|
||||
# characters included in auto-generated passwords
|
||||
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
|
||||
|
||||
# non-configurable things
|
||||
MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script']
|
||||
DEFAULT_BECOME_PASS = None
|
||||
DEFAULT_SUDO_PASS = None
|
||||
DEFAULT_REMOTE_PASS = None
|
||||
|
|
|
@ -140,6 +140,10 @@ class AnsibleError(Exception):
|
|||
|
||||
return error_message
|
||||
|
||||
class AnsibleOptionsError(AnsibleError):
|
||||
''' bad or incomplete options passed '''
|
||||
pass
|
||||
|
||||
class AnsibleParserError(AnsibleError):
|
||||
''' something was detected early that is wrong about a playbook or data file '''
|
||||
pass
|
||||
|
@ -164,6 +168,14 @@ class AnsibleFilterError(AnsibleRuntimeError):
|
|||
''' a templating failure '''
|
||||
pass
|
||||
|
||||
class AnsibleLookupError(AnsibleRuntimeError):
|
||||
''' a lookup failure '''
|
||||
pass
|
||||
|
||||
class AnsibleCallbackError(AnsibleRuntimeError):
|
||||
''' a callback failure '''
|
||||
pass
|
||||
|
||||
class AnsibleUndefinedVariable(AnsibleRuntimeError):
|
||||
''' a templating failure '''
|
||||
pass
|
||||
|
@ -171,7 +183,3 @@ class AnsibleUndefinedVariable(AnsibleRuntimeError):
|
|||
class AnsibleFileNotFound(AnsibleRuntimeError):
|
||||
''' a file missing failure '''
|
||||
pass
|
||||
|
||||
class AnsibleParserError(AnsibleRuntimeError):
|
||||
''' a parser error '''
|
||||
pass
|
||||
|
|
70
v2/ansible/galaxy/__init__.py
Normal file
70
v2/ansible/galaxy/__init__.py
Normal file
|
@ -0,0 +1,70 @@
|
|||
########################################################################
|
||||
#
|
||||
# (C) 2015, Brian Coca <bcoca@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
########################################################################
|
||||
''' This manages remote shared Ansible objects, mainly roles'''
|
||||
|
||||
import os
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.utils.display import Display
|
||||
|
||||
# default_readme_template
|
||||
# default_meta_template
|
||||
|
||||
|
||||
class Galaxy(object):
|
||||
''' Keeps global galaxy info '''
|
||||
|
||||
def __init__(self, options, display=None):
|
||||
|
||||
if display is None:
|
||||
self.display = Display()
|
||||
else:
|
||||
self.display = display
|
||||
|
||||
self.options = options
|
||||
self.roles_path = getattr(self.options, 'roles_path', None)
|
||||
if self.roles_path:
|
||||
self.roles_path = os.path.expanduser(self.roles_path)
|
||||
|
||||
self.roles = {}
|
||||
|
||||
# load data path for resource usage
|
||||
this_dir, this_filename = os.path.split(__file__)
|
||||
self.DATA_PATH = os.path.join(this_dir, "data")
|
||||
|
||||
#TODO: move to getter for lazy loading
|
||||
self.default_readme = self._str_from_data_file('readme')
|
||||
self.default_meta = self._str_from_data_file('metadata_template.j2')
|
||||
|
||||
def add_role(self, role):
|
||||
self.roles[role.name] = role
|
||||
|
||||
def remove_role(self, role_name):
|
||||
del self.roles[role_name]
|
||||
|
||||
|
||||
def _str_from_data_file(self, filename):
|
||||
myfile = os.path.join(self.DATA_PATH, filename)
|
||||
try:
|
||||
return open(myfile).read()
|
||||
except Exception as e:
|
||||
raise AnsibleError("Could not open %s: %s" % (filename, str(e)))
|
||||
|
141
v2/ansible/galaxy/api.py
Executable file
141
v2/ansible/galaxy/api.py
Executable file
|
@ -0,0 +1,141 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
########################################################################
|
||||
#
|
||||
# (C) 2013, James Cammarata <jcammarata@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
########################################################################
|
||||
import json
|
||||
from urllib2 import urlopen, quote as urlquote
|
||||
from urlparse import urlparse
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
|
||||
class GalaxyAPI(object):
|
||||
''' This class is meant to be used as a API client for an Ansible Galaxy server '''
|
||||
|
||||
SUPPORTED_VERSIONS = ['v1']
|
||||
|
||||
def __init__(self, galaxy, api_server):
|
||||
|
||||
self.galaxy = galaxy
|
||||
|
||||
try:
|
||||
urlparse(api_server, scheme='https')
|
||||
except:
|
||||
raise AnsibleError("Invalid server API url passed: %s" % api_server)
|
||||
|
||||
server_version = self.get_server_api_version('%s/api/' % (api_server))
|
||||
if not server_version:
|
||||
raise AnsibleError("Could not retrieve server API version: %s" % api_server)
|
||||
|
||||
if server_version in self.SUPPORTED_VERSIONS:
|
||||
self.baseurl = '%s/api/%s' % (api_server, server_version)
|
||||
self.version = server_version # for future use
|
||||
self.galaxy.display.vvvvv("Base API: %s" % self.baseurl)
|
||||
else:
|
||||
raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version)
|
||||
|
||||
def get_server_api_version(self, api_server):
|
||||
"""
|
||||
Fetches the Galaxy API current version to ensure
|
||||
the API server is up and reachable.
|
||||
"""
|
||||
#TODO: fix galaxy server which returns current_version path (/api/v1) vs actual version (v1)
|
||||
# also should set baseurl using supported_versions which has path
|
||||
return 'v1'
|
||||
|
||||
try:
|
||||
data = json.load(urlopen(api_server))
|
||||
return data.get("current_version", 'v1')
|
||||
except Exception as e:
|
||||
# TODO: report error
|
||||
return None
|
||||
|
||||
def lookup_role_by_name(self, role_name, notify=True):
|
||||
"""
|
||||
Find a role by name
|
||||
"""
|
||||
role_name = urlquote(role_name)
|
||||
|
||||
try:
|
||||
parts = role_name.split(".")
|
||||
user_name = ".".join(parts[0:-1])
|
||||
role_name = parts[-1]
|
||||
if notify:
|
||||
self.galaxy.display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
|
||||
except:
|
||||
raise AnsibleError("- invalid role name (%s). Specify role as format: username.rolename" % role_name)
|
||||
|
||||
url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name)
|
||||
self.galaxy.display.vvvv("- %s" % (url))
|
||||
try:
|
||||
data = json.load(urlopen(url))
|
||||
if len(data["results"]) != 0:
|
||||
return data["results"][0]
|
||||
except:
|
||||
# TODO: report on connection/availability errors
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
def fetch_role_related(self, related, role_id):
|
||||
"""
|
||||
Fetch the list of related items for the given role.
|
||||
The url comes from the 'related' field of the role.
|
||||
"""
|
||||
|
||||
try:
|
||||
url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related)
|
||||
data = json.load(urlopen(url))
|
||||
results = data['results']
|
||||
done = (data.get('next', None) == None)
|
||||
while not done:
|
||||
url = '%s%s' % (self.baseurl, data['next'])
|
||||
self.galaxy.display.display(url)
|
||||
data = json.load(urlopen(url))
|
||||
results += data['results']
|
||||
done = (data.get('next', None) == None)
|
||||
return results
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_list(self, what):
|
||||
"""
|
||||
Fetch the list of items specified.
|
||||
"""
|
||||
|
||||
try:
|
||||
url = '%s/%s/?page_size' % (self.baseurl, what)
|
||||
data = json.load(urlopen(url))
|
||||
if "results" in data:
|
||||
results = data['results']
|
||||
else:
|
||||
results = data
|
||||
done = True
|
||||
if "next" in data:
|
||||
done = (data.get('next', None) == None)
|
||||
while not done:
|
||||
url = '%s%s' % (self.baseurl, data['next'])
|
||||
self.galaxy.display.display(url)
|
||||
data = json.load(urlopen(url))
|
||||
results += data['results']
|
||||
done = (data.get('next', None) == None)
|
||||
return results
|
||||
except Exception as error:
|
||||
raise AnsibleError("Failed to download the %s list: %s" % (what, str(error)))
|
45
v2/ansible/galaxy/data/metadata_template.j2
Normal file
45
v2/ansible/galaxy/data/metadata_template.j2
Normal file
|
@ -0,0 +1,45 @@
|
|||
galaxy_info:
|
||||
author: {{ author }}
|
||||
description: {{description}}
|
||||
company: {{ company }}
|
||||
# If the issue tracker for your role is not on github, uncomment the
|
||||
# next line and provide a value
|
||||
# issue_tracker_url: {{ issue_tracker_url }}
|
||||
# Some suggested licenses:
|
||||
# - BSD (default)
|
||||
# - MIT
|
||||
# - GPLv2
|
||||
# - GPLv3
|
||||
# - Apache
|
||||
# - CC-BY
|
||||
license: {{ license }}
|
||||
min_ansible_version: {{ min_ansible_version }}
|
||||
#
|
||||
# Below are all platforms currently available. Just uncomment
|
||||
# the ones that apply to your role. If you don't see your
|
||||
# platform on this list, let us know and we'll get it added!
|
||||
#
|
||||
#platforms:
|
||||
{%- for platform,versions in platforms.iteritems() %}
|
||||
#- name: {{ platform }}
|
||||
# versions:
|
||||
# - all
|
||||
{%- for version in versions %}
|
||||
# - {{ version }}
|
||||
{%- endfor %}
|
||||
{%- endfor %}
|
||||
#
|
||||
# Below are all categories currently available. Just as with
|
||||
# the platforms above, uncomment those that apply to your role.
|
||||
#
|
||||
#categories:
|
||||
{%- for category in categories %}
|
||||
#- {{ category.name }}
|
||||
{%- endfor %}
|
||||
dependencies: []
|
||||
# List your role dependencies here, one per line.
|
||||
# Be sure to remove the '[]' above if you add dependencies
|
||||
# to this list.
|
||||
{% for dependency in dependencies %}
|
||||
#- {{ dependency }}
|
||||
{% endfor %}
|
38
v2/ansible/galaxy/data/readme
Normal file
38
v2/ansible/galaxy/data/readme
Normal file
|
@ -0,0 +1,38 @@
|
|||
Role Name
|
||||
=========
|
||||
|
||||
A brief description of the role goes here.
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
|
||||
|
||||
Role Variables
|
||||
--------------
|
||||
|
||||
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
|
||||
|
||||
Example Playbook
|
||||
----------------
|
||||
|
||||
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
|
||||
|
||||
- hosts: servers
|
||||
roles:
|
||||
- { role: username.rolename, x: 42 }
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
BSD
|
||||
|
||||
Author Information
|
||||
------------------
|
||||
|
||||
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
|
295
v2/ansible/galaxy/role.py
Normal file
295
v2/ansible/galaxy/role.py
Normal file
|
@ -0,0 +1,295 @@
|
|||
########################################################################
|
||||
#
|
||||
# (C) 2015, Brian Coca <bcoca@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
########################################################################
|
||||
|
||||
import datetime
|
||||
import os
|
||||
import subprocess
|
||||
import tarfile
|
||||
import tempfile
|
||||
import yaml
|
||||
from shutil import rmtree
|
||||
from urllib2 import urlopen
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
|
||||
class GalaxyRole(object):
|
||||
|
||||
SUPPORTED_SCMS = set(['git', 'hg'])
|
||||
META_MAIN = os.path.join('meta', 'main.yml')
|
||||
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
|
||||
ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars')
|
||||
|
||||
|
||||
def __init__(self, galaxy, name, src=None, version=None, scm=None):
|
||||
|
||||
self._metadata = None
|
||||
self._install_info = None
|
||||
|
||||
self.options = galaxy.options
|
||||
self.display = galaxy.display
|
||||
|
||||
self.name = name
|
||||
self.version = version
|
||||
self.src = src
|
||||
self.scm = scm
|
||||
|
||||
self.path = (os.path.join(galaxy.roles_path, self.name))
|
||||
|
||||
def fetch_from_scm_archive(self):
|
||||
|
||||
# this can be configured to prevent unwanted SCMS but cannot add new ones unless the code is also updated
|
||||
if scm not in self.scms:
|
||||
self.display.display("The %s scm is not currently supported" % scm)
|
||||
return False
|
||||
|
||||
tempdir = tempfile.mkdtemp()
|
||||
clone_cmd = [scm, 'clone', role_url, self.name]
|
||||
with open('/dev/null', 'w') as devnull:
|
||||
try:
|
||||
self.display.display("- executing: %s" % " ".join(clone_cmd))
|
||||
popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
|
||||
except:
|
||||
raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
|
||||
rc = popen.wait()
|
||||
if rc != 0:
|
||||
self.display.display("- command %s failed" % ' '.join(clone_cmd))
|
||||
self.display.display(" in directory %s" % tempdir)
|
||||
return False
|
||||
|
||||
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
|
||||
if scm == 'hg':
|
||||
archive_cmd = ['hg', 'archive', '--prefix', "%s/" % self.name]
|
||||
if role_version:
|
||||
archive_cmd.extend(['-r', role_version])
|
||||
archive_cmd.append(temp_file.name)
|
||||
if scm == 'git':
|
||||
archive_cmd = ['git', 'archive', '--prefix=%s/' % self.name, '--output=%s' % temp_file.name]
|
||||
if role_version:
|
||||
archive_cmd.append(role_version)
|
||||
else:
|
||||
archive_cmd.append('HEAD')
|
||||
|
||||
with open('/dev/null', 'w') as devnull:
|
||||
self.display.display("- executing: %s" % " ".join(archive_cmd))
|
||||
popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, self.name),
|
||||
stderr=devnull, stdout=devnull)
|
||||
rc = popen.wait()
|
||||
if rc != 0:
|
||||
self.display.display("- command %s failed" % ' '.join(archive_cmd))
|
||||
self.display.display(" in directory %s" % tempdir)
|
||||
return False
|
||||
|
||||
rmtree(tempdir, ignore_errors=True)
|
||||
|
||||
return temp_file.name
|
||||
|
||||
@property
|
||||
def metadata(self):
|
||||
"""
|
||||
Returns role metadata
|
||||
"""
|
||||
if self._metadata is None:
|
||||
meta_path = os.path.join(self.path, self.META_MAIN)
|
||||
if os.path.isfile(meta_path):
|
||||
try:
|
||||
f = open(meta_path, 'r')
|
||||
self._metadata = yaml.safe_load(f)
|
||||
except:
|
||||
self.display.vvvvv("Unable to load metadata for %s" % self.name)
|
||||
return False
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
return self._metadata
|
||||
|
||||
|
||||
@property
|
||||
def install_info(self):
|
||||
"""
|
||||
Returns role install info
|
||||
"""
|
||||
if self._install_info is None:
|
||||
|
||||
info_path = os.path.join(self.path, self.META_INSTALL)
|
||||
if os.path.isfile(info_path):
|
||||
try:
|
||||
f = open(info_path, 'r')
|
||||
self._install_info = yaml.safe_load(f)
|
||||
except:
|
||||
self.display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
|
||||
return False
|
||||
finally:
|
||||
f.close()
|
||||
return self._install_info
|
||||
|
||||
def _write_galaxy_install_info(self):
|
||||
"""
|
||||
Writes a YAML-formatted file to the role's meta/ directory
|
||||
(named .galaxy_install_info) which contains some information
|
||||
we can use later for commands like 'list' and 'info'.
|
||||
"""
|
||||
|
||||
info = dict(
|
||||
version=self.version,
|
||||
install_date=datetime.datetime.utcnow().strftime("%c"),
|
||||
)
|
||||
info_path = os.path.join(self.path, self.META_INSTALL)
|
||||
try:
|
||||
f = open(info_path, 'w+')
|
||||
self._install_info = yaml.safe_dump(info, f)
|
||||
except:
|
||||
return False
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
return True
|
||||
|
||||
def remove(self):
|
||||
"""
|
||||
Removes the specified role from the roles path. There is a
|
||||
sanity check to make sure there's a meta/main.yml file at this
|
||||
path so the user doesn't blow away random directories
|
||||
"""
|
||||
if self.metadata:
|
||||
try:
|
||||
rmtree(self.path)
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
def fetch(self, target, role_data):
|
||||
"""
|
||||
Downloads the archived role from github to a temp location, extracts
|
||||
it, and then copies the extracted role to the role library path.
|
||||
"""
|
||||
|
||||
# first grab the file and save it to a temp location
|
||||
if self.src:
|
||||
archive_url = self.src
|
||||
else:
|
||||
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target)
|
||||
self.display.display("- downloading role from %s" % archive_url)
|
||||
|
||||
try:
|
||||
url_file = urlopen(archive_url)
|
||||
temp_file = tempfile.NamedTemporaryFile(delete=False)
|
||||
data = url_file.read()
|
||||
while data:
|
||||
temp_file.write(data)
|
||||
data = url_file.read()
|
||||
temp_file.close()
|
||||
return temp_file.name
|
||||
except:
|
||||
# TODO: better urllib2 error handling for error
|
||||
# messages that are more exact
|
||||
self.display.error("failed to download the file.")
|
||||
return False
|
||||
|
||||
def install(self, role_filename):
|
||||
# the file is a tar, so open it that way and extract it
|
||||
# to the specified (or default) roles directory
|
||||
|
||||
if not tarfile.is_tarfile(role_filename):
|
||||
self.display.error("the file downloaded was not a tar.gz")
|
||||
return False
|
||||
else:
|
||||
if role_filename.endswith('.gz'):
|
||||
role_tar_file = tarfile.open(role_filename, "r:gz")
|
||||
else:
|
||||
role_tar_file = tarfile.open(role_filename, "r")
|
||||
# verify the role's meta file
|
||||
meta_file = None
|
||||
members = role_tar_file.getmembers()
|
||||
# next find the metadata file
|
||||
for member in members:
|
||||
if self.META_MAIN in member.name:
|
||||
meta_file = member
|
||||
break
|
||||
if not meta_file:
|
||||
self.display.error("this role does not appear to have a meta/main.yml file.")
|
||||
return False
|
||||
else:
|
||||
try:
|
||||
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
|
||||
except:
|
||||
self.display.error("this role does not appear to have a valid meta/main.yml file.")
|
||||
return False
|
||||
|
||||
# we strip off the top-level directory for all of the files contained within
|
||||
# the tar file here, since the default is 'github_repo-target', and change it
|
||||
# to the specified role's name
|
||||
self.display.display("- extracting %s to %s" % (self.name, self.path))
|
||||
try:
|
||||
if os.path.exists(self.path):
|
||||
if not os.path.isdir(self.path):
|
||||
self.display.error("the specified roles path exists and is not a directory.")
|
||||
return False
|
||||
elif not getattr(self.options, "force", False):
|
||||
self.display.error("the specified role %s appears to already exist. Use --force to replace it." % self.name)
|
||||
return False
|
||||
else:
|
||||
# using --force, remove the old path
|
||||
if not self.remove():
|
||||
self.display.error("%s doesn't appear to contain a role." % self.path)
|
||||
self.display.error(" please remove this directory manually if you really want to put the role here.")
|
||||
return False
|
||||
else:
|
||||
os.makedirs(self.path)
|
||||
|
||||
# now we do the actual extraction to the path
|
||||
for member in members:
|
||||
# we only extract files, and remove any relative path
|
||||
# bits that might be in the file for security purposes
|
||||
# and drop the leading directory, as mentioned above
|
||||
if member.isreg() or member.issym():
|
||||
parts = member.name.split(os.sep)[1:]
|
||||
final_parts = []
|
||||
for part in parts:
|
||||
if part != '..' and '~' not in part and '$' not in part:
|
||||
final_parts.append(part)
|
||||
member.name = os.path.join(*final_parts)
|
||||
role_tar_file.extract(member, self.path)
|
||||
|
||||
# write out the install info file for later use
|
||||
self._write_galaxy_install_info()
|
||||
except OSError as e:
|
||||
self.display.error("Could not update files in %s: %s" % (self.path, str(e)))
|
||||
return False
|
||||
|
||||
# return the parsed yaml metadata
|
||||
self.display.display("- %s was installed successfully" % self.name)
|
||||
return True
|
||||
|
||||
@property
|
||||
def spec(self):
|
||||
"""
|
||||
Returns role spec info
|
||||
{
|
||||
'scm': 'git',
|
||||
'src': 'http://git.example.com/repos/repo.git',
|
||||
'version': 'v1.0',
|
||||
'name': 'repo'
|
||||
}
|
||||
"""
|
||||
return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
|
|
@ -203,7 +203,7 @@ class VaultEditor(object):
|
|||
_, tmp_path = tempfile.mkstemp()
|
||||
|
||||
if existing_data:
|
||||
self.write_data(data, tmp_path)
|
||||
self.write_data(existing_data, tmp_path)
|
||||
|
||||
# drop the user into an editor on the tmp file
|
||||
call(self._editor_shell_command(tmp_path))
|
||||
|
|
|
@ -37,6 +37,8 @@ class CallbackModule(CallbackBase):
|
|||
pass
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
if 'exception' in result._result and self._display.verbosity < 3:
|
||||
del result._result['exception']
|
||||
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), json.dumps(result._result, ensure_ascii=False)), color='red')
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
|
|
|
@ -38,6 +38,8 @@ class CallbackModule(CallbackBase):
|
|||
pass
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
if 'exception' in result._result and self._display.verbosity < 3:
|
||||
del result._result['exception']
|
||||
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), result._result), color='red')
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
|
|
|
@ -28,6 +28,7 @@ import getpass
|
|||
|
||||
from ansible import __version__
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.utils.unicode import to_bytes
|
||||
|
||||
# FIXME: documentation for methods here, which have mostly been
|
||||
|
@ -40,141 +41,286 @@ class SortedOptParser(optparse.OptionParser):
|
|||
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
|
||||
return optparse.OptionParser.format_help(self, formatter=None)
|
||||
|
||||
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False,
|
||||
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
|
||||
''' create an options parser for any ansible script '''
|
||||
#TODO: move many cli only functions in this file into the CLI class
|
||||
class CLI(object):
|
||||
''' code behind bin/ansible* programs '''
|
||||
|
||||
parser = SortedOptParser(usage, version=version("%prog"))
|
||||
VALID_ACTIONS = ['No Actions']
|
||||
|
||||
parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
|
||||
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
|
||||
parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count",
|
||||
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
|
||||
parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
|
||||
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
|
||||
parser.add_option('-i', '--inventory-file', dest='inventory',
|
||||
help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST,
|
||||
default=C.DEFAULT_HOST_LIST)
|
||||
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
|
||||
help='ask for connection password')
|
||||
parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
|
||||
help='use this file to authenticate the connection')
|
||||
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
|
||||
help='ask for vault password')
|
||||
parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE,
|
||||
dest='vault_password_file', help="vault password file")
|
||||
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
|
||||
help='outputs a list of matching hosts; does not execute anything else')
|
||||
parser.add_option('-M', '--module-path', dest='module_path',
|
||||
help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH,
|
||||
default=None)
|
||||
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
|
||||
help="set additional variables as key=value or YAML/JSON", default=[])
|
||||
def __init__(self, args, display=None):
|
||||
"""
|
||||
Base init method for all command line programs
|
||||
"""
|
||||
|
||||
if subset_opts:
|
||||
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
|
||||
help='further limit selected hosts to an additional pattern')
|
||||
parser.add_option('-t', '--tags', dest='tags', default='all',
|
||||
help="only run plays and tasks tagged with these values")
|
||||
parser.add_option('--skip-tags', dest='skip_tags',
|
||||
help="only run plays and tasks whose tags do not match these values")
|
||||
self.args = args
|
||||
self.options = None
|
||||
self.parser = None
|
||||
self.action = None
|
||||
|
||||
if output_opts:
|
||||
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
|
||||
help='condense output')
|
||||
parser.add_option('-t', '--tree', dest='tree', default=None,
|
||||
help='log output to this directory')
|
||||
if display is None:
|
||||
self.display = Display()
|
||||
else:
|
||||
self.display = display
|
||||
|
||||
if runas_opts:
|
||||
# priv user defaults to root later on to enable detecting when this option was given here
|
||||
parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
|
||||
help='ask for sudo password (deprecated, use become)')
|
||||
parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true',
|
||||
help='ask for su password (deprecated, use become)')
|
||||
parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo',
|
||||
help="run operations with sudo (nopasswd) (deprecated, use become)")
|
||||
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
|
||||
help='desired sudo user (default=root) (deprecated, use become)')
|
||||
parser.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true',
|
||||
help='run operations with su (deprecated, use become)')
|
||||
parser.add_option('-R', '--su-user', default=None,
|
||||
help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER)
|
||||
def set_action(self):
|
||||
"""
|
||||
Get the action the user wants to execute from the sys argv list.
|
||||
"""
|
||||
for i in range(0,len(self.args)):
|
||||
arg = self.args[i]
|
||||
if arg in self.VALID_ACTIONS:
|
||||
self.action = arg
|
||||
del self.args[i]
|
||||
break
|
||||
|
||||
# consolidated privilege escalation (become)
|
||||
parser.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
|
||||
help="run operations with become (nopasswd implied)")
|
||||
parser.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='string',
|
||||
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS)))
|
||||
parser.add_option('--become-user', default=None, dest='become_user', type='string',
|
||||
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
|
||||
parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
|
||||
help='ask for privilege escalation password')
|
||||
if not self.action:
|
||||
self.parser.print_help()
|
||||
raise AnsibleError("Missing required action")
|
||||
|
||||
def execute(self):
|
||||
"""
|
||||
Actually runs a child defined method using the execute_<action> pattern
|
||||
"""
|
||||
fn = getattr(self, "execute_%s" % self.action)
|
||||
fn()
|
||||
|
||||
def parse(self):
|
||||
raise Exception("Need to implement!")
|
||||
|
||||
def run(self):
|
||||
raise Exception("Need to implement!")
|
||||
|
||||
@staticmethod
|
||||
def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
|
||||
|
||||
vault_pass = None
|
||||
new_vault_pass = None
|
||||
|
||||
if ask_vault_pass:
|
||||
vault_pass = getpass.getpass(prompt="Vault password: ")
|
||||
|
||||
if ask_vault_pass and confirm_vault:
|
||||
vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
|
||||
if vault_pass != vault_pass2:
|
||||
raise errors.AnsibleError("Passwords do not match")
|
||||
|
||||
if ask_new_vault_pass:
|
||||
new_vault_pass = getpass.getpass(prompt="New Vault password: ")
|
||||
|
||||
if ask_new_vault_pass and confirm_new:
|
||||
new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
|
||||
if new_vault_pass != new_vault_pass2:
|
||||
raise errors.AnsibleError("Passwords do not match")
|
||||
|
||||
# enforce no newline chars at the end of passwords
|
||||
if vault_pass:
|
||||
vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
|
||||
if new_vault_pass:
|
||||
new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
|
||||
|
||||
return vault_pass, new_vault_pass
|
||||
|
||||
|
||||
if connect_opts:
|
||||
parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
|
||||
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
|
||||
parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout',
|
||||
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
|
||||
def ask_passwords(self):
|
||||
|
||||
op = self.options
|
||||
sshpass = None
|
||||
becomepass = None
|
||||
become_prompt = ''
|
||||
|
||||
if op.ask_pass:
|
||||
sshpass = getpass.getpass(prompt="SSH password: ")
|
||||
become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper()
|
||||
if sshpass:
|
||||
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
|
||||
else:
|
||||
become_prompt = "%s password: " % op.become_method.upper()
|
||||
|
||||
if op.become_ask_pass:
|
||||
becomepass = getpass.getpass(prompt=become_prompt)
|
||||
if op.ask_pass and becomepass == '':
|
||||
becomepass = sshpass
|
||||
if becomepass:
|
||||
becomepass = to_bytes(becomepass)
|
||||
|
||||
return (sshpass, becomepass)
|
||||
|
||||
|
||||
if async_opts:
|
||||
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int',
|
||||
dest='poll_interval',
|
||||
help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
|
||||
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
|
||||
help='run asynchronously, failing after X seconds (default=N/A)')
|
||||
def normalize_become_options(self):
|
||||
''' this keeps backwards compatibility with sudo/su self.options '''
|
||||
self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
|
||||
self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER
|
||||
|
||||
if check_opts:
|
||||
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
|
||||
help="don't make any changes; instead, try to predict some of the changes that may occur")
|
||||
parser.add_option('--syntax-check', dest='syntax', action='store_true',
|
||||
help="perform a syntax check on the playbook, but do not execute it")
|
||||
|
||||
if diff_opts:
|
||||
parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
|
||||
help="when changing (small) files and templates, show the differences in those files; works great with --check"
|
||||
)
|
||||
|
||||
if meta_opts:
|
||||
parser.add_option('--force-handlers', dest='force_handlers', action='store_true',
|
||||
help="run handlers even if a task fails")
|
||||
parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
|
||||
help="clear the fact cache")
|
||||
|
||||
return parser
|
||||
|
||||
def version(prog):
|
||||
result = "{0} {1}".format(prog, __version__)
|
||||
gitinfo = _gitinfo()
|
||||
if gitinfo:
|
||||
result = result + " {0}".format(gitinfo)
|
||||
result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
|
||||
return result
|
||||
|
||||
def version_info(gitinfo=False):
|
||||
if gitinfo:
|
||||
# expensive call, user with care
|
||||
ansible_version_string = version('')
|
||||
else:
|
||||
ansible_version_string = __version__
|
||||
ansible_version = ansible_version_string.split()[0]
|
||||
ansible_versions = ansible_version.split('.')
|
||||
for counter in range(len(ansible_versions)):
|
||||
if ansible_versions[counter] == "":
|
||||
ansible_versions[counter] = 0
|
||||
try:
|
||||
ansible_versions[counter] = int(ansible_versions[counter])
|
||||
except:
|
||||
if self.options.become:
|
||||
pass
|
||||
if len(ansible_versions) < 3:
|
||||
for counter in range(len(ansible_versions), 3):
|
||||
ansible_versions.append(0)
|
||||
return {'string': ansible_version_string.strip(),
|
||||
'full': ansible_version,
|
||||
'major': ansible_versions[0],
|
||||
'minor': ansible_versions[1],
|
||||
'revision': ansible_versions[2]}
|
||||
elif self.options.sudo:
|
||||
self.options.become = True
|
||||
self.options.become_method = 'sudo'
|
||||
elif self.options.su:
|
||||
self.options.become = True
|
||||
options.become_method = 'su'
|
||||
|
||||
|
||||
def validate_conflicts(self):
|
||||
|
||||
op = self.options
|
||||
|
||||
# Check for vault related conflicts
|
||||
if (op.ask_vault_pass and op.vault_password_file):
|
||||
self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
|
||||
|
||||
|
||||
# Check for privilege escalation conflicts
|
||||
if (op.su or op.su_user or op.ask_su_pass) and \
|
||||
(op.sudo or op.sudo_user or op.ask_sudo_pass) or \
|
||||
(op.su or op.su_user or op.ask_su_pass) and \
|
||||
(op.become or op.become_user or op.become_ask_pass) or \
|
||||
(op.sudo or op.sudo_user or op.ask_sudo_pass) and \
|
||||
(op.become or op.become_user or op.become_ask_pass):
|
||||
|
||||
self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
|
||||
"and su arguments ('-su', '--su-user', and '--ask-su-pass') "
|
||||
"and become arguments ('--become', '--become-user', and '--ask-become-pass')"
|
||||
" are exclusive of each other")
|
||||
|
||||
@staticmethod
|
||||
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False,
|
||||
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
|
||||
''' create an options parser for any ansible script '''
|
||||
|
||||
parser = SortedOptParser(usage, version=CLI.version("%prog"))
|
||||
|
||||
parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
|
||||
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
|
||||
parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count",
|
||||
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
|
||||
parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
|
||||
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
|
||||
parser.add_option('-i', '--inventory-file', dest='inventory',
|
||||
help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST,
|
||||
default=C.DEFAULT_HOST_LIST)
|
||||
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
|
||||
help='ask for connection password')
|
||||
parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
|
||||
help='use this file to authenticate the connection')
|
||||
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
|
||||
help='ask for vault password')
|
||||
parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE,
|
||||
dest='vault_password_file', help="vault password file")
|
||||
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
|
||||
help='outputs a list of matching hosts; does not execute anything else')
|
||||
parser.add_option('-M', '--module-path', dest='module_path',
|
||||
help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH,
|
||||
default=None)
|
||||
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
|
||||
help="set additional variables as key=value or YAML/JSON", default=[])
|
||||
|
||||
if subset_opts:
|
||||
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
|
||||
help='further limit selected hosts to an additional pattern')
|
||||
parser.add_option('-t', '--tags', dest='tags', default='all',
|
||||
help="only run plays and tasks tagged with these values")
|
||||
parser.add_option('--skip-tags', dest='skip_tags',
|
||||
help="only run plays and tasks whose tags do not match these values")
|
||||
|
||||
if output_opts:
|
||||
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
|
||||
help='condense output')
|
||||
parser.add_option('-t', '--tree', dest='tree', default=None,
|
||||
help='log output to this directory')
|
||||
|
||||
if runas_opts:
|
||||
# priv user defaults to root later on to enable detecting when this option was given here
|
||||
parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
|
||||
help='ask for sudo password (deprecated, use become)')
|
||||
parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true',
|
||||
help='ask for su password (deprecated, use become)')
|
||||
parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo',
|
||||
help="run operations with sudo (nopasswd) (deprecated, use become)")
|
||||
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
|
||||
help='desired sudo user (default=root) (deprecated, use become)')
|
||||
parser.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true',
|
||||
help='run operations with su (deprecated, use become)')
|
||||
parser.add_option('-R', '--su-user', default=None,
|
||||
help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER)
|
||||
|
||||
# consolidated privilege escalation (become)
|
||||
parser.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
|
||||
help="run operations with become (nopasswd implied)")
|
||||
parser.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='string',
|
||||
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS)))
|
||||
parser.add_option('--become-user', default=None, dest='become_user', type='string',
|
||||
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
|
||||
parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
|
||||
help='ask for privilege escalation password')
|
||||
|
||||
|
||||
if connect_opts:
|
||||
parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
|
||||
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
|
||||
parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout',
|
||||
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
|
||||
|
||||
|
||||
if async_opts:
|
||||
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int',
|
||||
dest='poll_interval',
|
||||
help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
|
||||
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
|
||||
help='run asynchronously, failing after X seconds (default=N/A)')
|
||||
|
||||
if check_opts:
|
||||
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
|
||||
help="don't make any changes; instead, try to predict some of the changes that may occur")
|
||||
parser.add_option('--syntax-check', dest='syntax', action='store_true',
|
||||
help="perform a syntax check on the playbook, but do not execute it")
|
||||
|
||||
if diff_opts:
|
||||
parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
|
||||
help="when changing (small) files and templates, show the differences in those files; works great with --check"
|
||||
)
|
||||
|
||||
if meta_opts:
|
||||
parser.add_option('--force-handlers', dest='force_handlers', action='store_true',
|
||||
help="run handlers even if a task fails")
|
||||
parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
|
||||
help="clear the fact cache")
|
||||
|
||||
return parser
|
||||
|
||||
@staticmethod
|
||||
def version(prog):
|
||||
result = "{0} {1}".format(prog, __version__)
|
||||
gitinfo = _gitinfo()
|
||||
if gitinfo:
|
||||
result = result + " {0}".format(gitinfo)
|
||||
result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def version_info(gitinfo=False):
|
||||
if gitinfo:
|
||||
# expensive call, user with care
|
||||
ansible_version_string = version('')
|
||||
else:
|
||||
ansible_version_string = __version__
|
||||
ansible_version = ansible_version_string.split()[0]
|
||||
ansible_versions = ansible_version.split('.')
|
||||
for counter in range(len(ansible_versions)):
|
||||
if ansible_versions[counter] == "":
|
||||
ansible_versions[counter] = 0
|
||||
try:
|
||||
ansible_versions[counter] = int(ansible_versions[counter])
|
||||
except:
|
||||
pass
|
||||
if len(ansible_versions) < 3:
|
||||
for counter in range(len(ansible_versions), 3):
|
||||
ansible_versions.append(0)
|
||||
return {'string': ansible_version_string.strip(),
|
||||
'full': ansible_version,
|
||||
'major': ansible_versions[0],
|
||||
'minor': ansible_versions[1],
|
||||
'revision': ansible_versions[2]}
|
||||
|
||||
def _git_repo_info(repo_path):
|
||||
''' returns a string containing git branch, commit id and commit date '''
|
||||
|
@ -234,69 +380,3 @@ def _gitinfo():
|
|||
result += "\n {0}: {1}".format(submodule_path, submodule_info)
|
||||
f.close()
|
||||
return result
|
||||
|
||||
|
||||
def ask_passwords(options):
|
||||
sshpass = None
|
||||
becomepass = None
|
||||
vaultpass = None
|
||||
become_prompt = ''
|
||||
|
||||
if options.ask_pass:
|
||||
sshpass = getpass.getpass(prompt="SSH password: ")
|
||||
become_prompt = "%s password[defaults to SSH password]: " % options.become_method.upper()
|
||||
if sshpass:
|
||||
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
|
||||
else:
|
||||
become_prompt = "%s password: " % options.become_method.upper()
|
||||
|
||||
if options.become_ask_pass:
|
||||
becomepass = getpass.getpass(prompt=become_prompt)
|
||||
if options.ask_pass and becomepass == '':
|
||||
becomepass = sshpass
|
||||
if becomepass:
|
||||
becomepass = to_bytes(becomepass)
|
||||
|
||||
if options.ask_vault_pass:
|
||||
vaultpass = getpass.getpass(prompt="Vault password: ")
|
||||
if vaultpass:
|
||||
vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip()
|
||||
|
||||
return (sshpass, becomepass, vaultpass)
|
||||
|
||||
|
||||
def normalize_become_options(options):
|
||||
''' this keeps backwards compatibility with sudo/su options '''
|
||||
options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
|
||||
options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER
|
||||
|
||||
if options.become:
|
||||
pass
|
||||
elif options.sudo:
|
||||
options.become = True
|
||||
options.become_method = 'sudo'
|
||||
elif options.su:
|
||||
options.become = True
|
||||
options.become_method = 'su'
|
||||
|
||||
|
||||
def validate_conflicts(parser, options):
|
||||
|
||||
# Check for vault related conflicts
|
||||
if (options.ask_vault_pass and options.vault_password_file):
|
||||
parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
|
||||
|
||||
|
||||
# Check for privilege escalation conflicts
|
||||
if (options.su or options.su_user or options.ask_su_pass) and \
|
||||
(options.sudo or options.sudo_user or options.ask_sudo_pass) or \
|
||||
(options.su or options.su_user or options.ask_su_pass) and \
|
||||
(options.become or options.become_user or options.become_ask_pass) or \
|
||||
(options.sudo or options.sudo_user or options.ask_sudo_pass) and \
|
||||
(options.become or options.become_user or options.become_ask_pass):
|
||||
|
||||
parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
|
||||
"and su arguments ('-su', '--su-user', and '--ask-su-pass') "
|
||||
"and become arguments ('--become', '--become-user', and '--ask-become-pass')"
|
||||
" are exclusive of each other")
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ import textwrap
|
|||
import sys
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import *
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.utils.color import stringc
|
||||
|
||||
class Display:
|
||||
|
@ -35,6 +35,7 @@ class Display:
|
|||
# list of all deprecation messages to prevent duplicate display
|
||||
self._deprecations = {}
|
||||
self._warns = {}
|
||||
self._errors = {}
|
||||
|
||||
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False):
|
||||
msg2 = msg
|
||||
|
@ -83,7 +84,7 @@ class Display:
|
|||
if host is None:
|
||||
self.display(msg, color='blue')
|
||||
else:
|
||||
self.display("<%s> %s" % (host, msg), color='blue')
|
||||
self.display("<%s> %s" % (host, msg), color='blue', screen_only=True)
|
||||
|
||||
def deprecated(self, msg, version, removed=False):
|
||||
''' used to print out a deprecation message.'''
|
||||
|
@ -130,3 +131,12 @@ class Display:
|
|||
star_len = 3
|
||||
stars = "*" * star_len
|
||||
self.display("\n%s %s" % (msg, stars), color=color)
|
||||
|
||||
def error(self, msg):
|
||||
new_msg = "\n[ERROR]: %s" % msg
|
||||
wrapped = textwrap.wrap(new_msg, 79)
|
||||
new_msg = "\n".join(wrapped) + "\n"
|
||||
if new_msg not in self._errors:
|
||||
self.display(new_msg, color='red', stderr=True)
|
||||
self._errors[new_msg] = 1
|
||||
|
||||
|
|
126
v2/bin/ansible
126
v2/bin/ansible
|
@ -40,28 +40,20 @@ from ansible.inventory import Inventory
|
|||
from ansible.parsing import DataLoader
|
||||
from ansible.parsing.splitter import parse_kv
|
||||
from ansible.playbook.play import Play
|
||||
from ansible.utils.display import Display
|
||||
from ansible.utils.cli import base_parser, validate_conflicts, normalize_become_options, ask_passwords
|
||||
from ansible.utils.cli import CLI
|
||||
from ansible.utils.display import Display
|
||||
from ansible.utils.vault import read_vault_file
|
||||
from ansible.vars import VariableManager
|
||||
|
||||
########################################################
|
||||
|
||||
class Cli(object):
|
||||
''' code behind bin/ansible '''
|
||||
|
||||
def __init__(self, display=None):
|
||||
|
||||
if display is None:
|
||||
self.display = Display()
|
||||
else:
|
||||
self.display = display
|
||||
class AdHocCli(CLI):
|
||||
''' code behind ansible ad-hoc cli'''
|
||||
|
||||
def parse(self):
|
||||
''' create an options parser for bin/ansible '''
|
||||
|
||||
parser = base_parser(
|
||||
self.parser = CLI.base_parser(
|
||||
usage='%prog <host-pattern> [options]',
|
||||
runas_opts=True,
|
||||
async_opts=True,
|
||||
|
@ -71,102 +63,110 @@ class Cli(object):
|
|||
)
|
||||
|
||||
# options unique to ansible ad-hoc
|
||||
parser.add_option('-a', '--args', dest='module_args',
|
||||
self.parser.add_option('-a', '--args', dest='module_args',
|
||||
help="module arguments", default=C.DEFAULT_MODULE_ARGS)
|
||||
parser.add_option('-m', '--module-name', dest='module_name',
|
||||
self.parser.add_option('-m', '--module-name', dest='module_name',
|
||||
help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
|
||||
default=C.DEFAULT_MODULE_NAME)
|
||||
|
||||
options, args = parser.parse_args()
|
||||
self.options, self.args = self.parser.parse_args()
|
||||
|
||||
if len(args) == 0 or len(args) > 1:
|
||||
parser.print_help()
|
||||
if len(self.args) != 1:
|
||||
self.parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
display.verbosity = options.verbosity
|
||||
validate_conflicts(parser,options)
|
||||
self.display.verbosity = self.options.verbosity
|
||||
self.validate_conflicts()
|
||||
|
||||
return (options, args)
|
||||
return True
|
||||
|
||||
# ----------------------------------------------
|
||||
|
||||
def run(self, options, args):
|
||||
def run(self):
|
||||
''' use Runner lib to do SSH things '''
|
||||
|
||||
pattern = args[0]
|
||||
# only thing left should be host pattern
|
||||
pattern = self.args[0]
|
||||
|
||||
if options.connection == "local":
|
||||
options.ask_pass = False
|
||||
# ignore connection password cause we are local
|
||||
if self.options.connection == "local":
|
||||
self.options.ask_pass = False
|
||||
|
||||
sshpass = None
|
||||
becomepass = None
|
||||
vault_pass = None
|
||||
|
||||
normalize_become_options(options)
|
||||
(sshpass, becomepass, vault_pass) = ask_passwords(options)
|
||||
self.normalize_become_options()
|
||||
(sshpass, becomepass) = self.ask_passwords()
|
||||
passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
|
||||
|
||||
if options.vault_password_file:
|
||||
# read vault_pass from a file
|
||||
vault_pass = read_vault_file(options.vault_password_file)
|
||||
if self.options.vault_password_file:
|
||||
# read vault_pass from a file
|
||||
vault_pass = read_vault_file(self.options.vault_password_file)
|
||||
elif self.options.ask_vault_pass:
|
||||
vault_pass = self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)[0]
|
||||
|
||||
loader = DataLoader(vault_password=vault_pass)
|
||||
variable_manager = VariableManager()
|
||||
|
||||
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=options.inventory)
|
||||
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory)
|
||||
|
||||
hosts = inventory.list_hosts(pattern)
|
||||
if len(hosts) == 0:
|
||||
d = Display()
|
||||
d.warning("provided hosts list is empty, only localhost is available")
|
||||
self.display.warning("provided hosts list is empty, only localhost is available")
|
||||
|
||||
if options.listhosts:
|
||||
if self.options.listhosts:
|
||||
for host in hosts:
|
||||
self.display.display(' %s' % host.name)
|
||||
sys.exit(0)
|
||||
return 0
|
||||
|
||||
if ((options.module_name == 'command' or options.module_name == 'shell') and not options.module_args):
|
||||
raise AnsibleError("No argument passed to %s module" % options.module_name)
|
||||
if self.options.module_name in C.MODULE_REQUIRE_ARGS and not self.options.module_args:
|
||||
raise AnsibleError("No argument passed to %s module" % self.options.module_name)
|
||||
|
||||
# FIXME: async support needed
|
||||
#if options.seconds:
|
||||
#TODO: implement async support
|
||||
#if self.options.seconds:
|
||||
# callbacks.display("background launch...\n\n", color='cyan')
|
||||
# results, poller = runner.run_async(options.seconds)
|
||||
# results = self.poll_while_needed(poller, options)
|
||||
# results, poller = runner.run_async(self.options.seconds)
|
||||
# results = self.poll_while_needed(poller)
|
||||
#else:
|
||||
# results = runner.run()
|
||||
|
||||
# create a pseudo-play to execute the specified module via a single task
|
||||
play_ds = dict(
|
||||
name = "Ansible Ad-Hoc",
|
||||
hosts = pattern,
|
||||
gather_facts = 'no',
|
||||
tasks = [
|
||||
dict(action=dict(module=options.module_name, args=parse_kv(options.module_args))),
|
||||
]
|
||||
tasks = [ dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args))), ]
|
||||
)
|
||||
|
||||
play = Play().load(play_ds, variable_manager=variable_manager, loader=loader)
|
||||
|
||||
# now create a task queue manager to execute the play
|
||||
try:
|
||||
display = Display()
|
||||
tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=passwords, stdout_callback='minimal')
|
||||
tqm = TaskQueueManager(
|
||||
inventory=inventory,
|
||||
callback='minimal',
|
||||
variable_manager=variable_manager,
|
||||
loader=loader,
|
||||
display=self.display,
|
||||
options=self.options,
|
||||
passwords=passwords,
|
||||
stdout_callback='minimal',
|
||||
)
|
||||
result = tqm.run(play)
|
||||
tqm.cleanup()
|
||||
except AnsibleError:
|
||||
tqm.cleanup()
|
||||
raise
|
||||
finally:
|
||||
if tqm:
|
||||
tqm.cleanup()
|
||||
|
||||
return result
|
||||
|
||||
# ----------------------------------------------
|
||||
|
||||
def poll_while_needed(self, poller, options):
|
||||
def poll_while_needed(self, poller):
|
||||
''' summarize results from Runner '''
|
||||
|
||||
# BACKGROUND POLL LOGIC when -B and -P are specified
|
||||
if options.seconds and options.poll_interval > 0:
|
||||
poller.wait(options.seconds, options.poll_interval)
|
||||
if self.options.seconds and self.options.poll_interval > 0:
|
||||
poller.wait(self.options.seconds, self.options.poll_interval)
|
||||
|
||||
return poller.results
|
||||
|
||||
|
@ -176,15 +176,17 @@ class Cli(object):
|
|||
if __name__ == '__main__':
|
||||
|
||||
display = Display()
|
||||
#display.display(" ".join(sys.argv))
|
||||
|
||||
try:
|
||||
cli = Cli(display=display)
|
||||
(options, args) = cli.parse()
|
||||
sys.exit(cli.run(options, args))
|
||||
cli = AdHocCli(sys.argv, display=display)
|
||||
cli.parse()
|
||||
sys.exit(cli.run())
|
||||
except AnsibleOptionsError as e:
|
||||
cli.parser.print_help()
|
||||
display.display(str(e), stderr=True, color='red')
|
||||
sys.exit(1)
|
||||
except AnsibleError as e:
|
||||
display.display("[ERROR]: %s" % e, color='red', stderr=True)
|
||||
sys.exit(1)
|
||||
display.display(str(e), stderr=True, color='red')
|
||||
sys.exit(2)
|
||||
except KeyboardInterrupt:
|
||||
display.display("[ERROR]: interrupted", color='red', stderr=True)
|
||||
sys.exit(1)
|
||||
display.error("interrupted")
|
||||
sys.exit(4)
|
||||
|
|
518
v2/bin/ansible-galaxy
Executable file
518
v2/bin/ansible-galaxy
Executable file
|
@ -0,0 +1,518 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
########################################################################
|
||||
#
|
||||
# (C) 2013, James Cammarata <jcammarata@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
########################################################################
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tarfile
|
||||
import tempfile
|
||||
import urllib
|
||||
import urllib2
|
||||
import yaml
|
||||
|
||||
from collections import defaultdict
|
||||
from distutils.version import LooseVersion
|
||||
from jinja2 import Environment
|
||||
from optparse import OptionParser
|
||||
|
||||
import ansible.constants as C
|
||||
import ansible.utils
|
||||
import ansible.galaxy
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
from ansible.galaxy import Galaxy
|
||||
from ansible.galaxy.api import GalaxyAPI
|
||||
from ansible.galaxy.role import GalaxyRole
|
||||
from ansible.playbook.role.requirement import RoleRequirement
|
||||
from ansible.utils.display import Display
|
||||
from ansible.utils.cli import CLI
|
||||
|
||||
class GalaxyCLI(CLI):
|
||||
|
||||
VALID_ACTIONS = ("init", "info", "install", "list", "remove")
|
||||
SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
|
||||
|
||||
def __init__(self, args, display=None):
|
||||
|
||||
self.api = None
|
||||
self.galaxy = None
|
||||
super(GalaxyCLI, self).__init__(args, display)
|
||||
|
||||
def parse(self):
|
||||
''' create an options parser for bin/ansible '''
|
||||
|
||||
usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS)
|
||||
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
|
||||
OptionParser.format_epilog = lambda self, formatter: self.epilog
|
||||
parser = OptionParser(usage=usage, epilog=epilog)
|
||||
|
||||
self.parser = parser
|
||||
self.set_action()
|
||||
|
||||
# verbose
|
||||
self.parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count",
|
||||
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
|
||||
|
||||
# options specific to actions
|
||||
if self.action == "info":
|
||||
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
|
||||
elif self.action == "init":
|
||||
self.parser.set_usage("usage: %prog init [options] role_name")
|
||||
self.parser.add_option(
|
||||
'-p', '--init-path', dest='init_path', default="./",
|
||||
help='The path in which the skeleton role will be created. '
|
||||
'The default is the current working directory.')
|
||||
self.parser.add_option(
|
||||
'--offline', dest='offline', default=False, action='store_true',
|
||||
help="Don't query the galaxy API when creating roles")
|
||||
elif self.action == "install":
|
||||
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
|
||||
self.parser.add_option(
|
||||
'-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
|
||||
help='Ignore errors and continue with the next specified role.')
|
||||
self.parser.add_option(
|
||||
'-n', '--no-deps', dest='no_deps', action='store_true', default=False,
|
||||
help='Don\'t download roles listed as dependencies')
|
||||
self.parser.add_option(
|
||||
'-r', '--role-file', dest='role_file',
|
||||
help='A file containing a list of roles to be imported')
|
||||
elif self.action == "remove":
|
||||
self.parser.set_usage("usage: %prog remove role1 role2 ...")
|
||||
elif self.action == "list":
|
||||
self.parser.set_usage("usage: %prog list [role_name]")
|
||||
|
||||
# options that apply to more than one action
|
||||
if self.action != "init":
|
||||
self.parser.add_option(
|
||||
'-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH,
|
||||
help='The path to the directory containing your roles. '
|
||||
'The default is the roles_path configured in your '
|
||||
'ansible.cfg file (/etc/ansible/roles if not configured)')
|
||||
|
||||
if self.action in ("info","init","install"):
|
||||
self.parser.add_option( '-s', '--server', dest='api_server', default="https://galaxy.ansible.com",
|
||||
help='The API server destination')
|
||||
|
||||
if self.action in ("init","install"):
|
||||
self.parser.add_option(
|
||||
'-f', '--force', dest='force', action='store_true', default=False,
|
||||
help='Force overwriting an existing role')
|
||||
|
||||
# get options, args and galaxy object
|
||||
self.options, self.args =self.parser.parse_args()
|
||||
self.display.verbosity = self.options.verbosity
|
||||
self.galaxy = Galaxy(self.options, self.display)
|
||||
|
||||
return True
|
||||
|
||||
def run(self):
|
||||
|
||||
# if not offline, get connect to galaxy api
|
||||
if self.action in ("info","install") or (self.action == 'init' and not self.options.offline):
|
||||
api_server = self.options.api_server
|
||||
self.api = GalaxyAPI(self.galaxy, api_server)
|
||||
if not self.api:
|
||||
raise AnsibleError("The API server (%s) is not responding, please try again later." % api_server)
|
||||
|
||||
self.execute()
|
||||
|
||||
def get_opt(self, k, defval=""):
|
||||
"""
|
||||
Returns an option from an Optparse values instance.
|
||||
"""
|
||||
try:
|
||||
data = getattr(self.options, k)
|
||||
except:
|
||||
return defval
|
||||
if k == "roles_path":
|
||||
if os.pathsep in data:
|
||||
data = data.split(os.pathsep)[0]
|
||||
return data
|
||||
|
||||
def exit_without_ignore(self, rc=1):
|
||||
"""
|
||||
Exits with the specified return code unless the
|
||||
option --ignore-errors was specified
|
||||
"""
|
||||
if not self.get_opt("ignore_errors", False):
|
||||
self.display.error('- you can use --ignore-errors to skip failed roles and finish processing the list.')
|
||||
return rc
|
||||
|
||||
def execute_init(self):
|
||||
"""
|
||||
Executes the init action, which creates the skeleton framework
|
||||
of a role that complies with the galaxy metadata format.
|
||||
"""
|
||||
|
||||
init_path = self.get_opt('init_path', './')
|
||||
force = self.get_opt('force', False)
|
||||
offline = self.get_opt('offline', False)
|
||||
|
||||
role_name = self.args.pop(0).strip()
|
||||
if role_name == "":
|
||||
raise AnsibleOptionsError("- no role name specified for init")
|
||||
role_path = os.path.join(init_path, role_name)
|
||||
if os.path.exists(role_path):
|
||||
if os.path.isfile(role_path):
|
||||
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
|
||||
elif not force:
|
||||
raise AnsibleError("- the directory %s already exists." % role_path + \
|
||||
"you can use --force to re-initialize this directory,\n" + \
|
||||
"however it will reset any main.yml files that may have\n" + \
|
||||
"been modified there already.")
|
||||
|
||||
# create the default README.md
|
||||
if not os.path.exists(role_path):
|
||||
os.makedirs(role_path)
|
||||
readme_path = os.path.join(role_path, "README.md")
|
||||
f = open(readme_path, "wb")
|
||||
f.write(self.galaxy.default_readme)
|
||||
f.close
|
||||
|
||||
for dir in GalaxyRole.ROLE_DIRS:
|
||||
dir_path = os.path.join(init_path, role_name, dir)
|
||||
main_yml_path = os.path.join(dir_path, 'main.yml')
|
||||
# create the directory if it doesn't exist already
|
||||
if not os.path.exists(dir_path):
|
||||
os.makedirs(dir_path)
|
||||
|
||||
# now create the main.yml file for that directory
|
||||
if dir == "meta":
|
||||
# create a skeleton meta/main.yml with a valid galaxy_info
|
||||
# datastructure in place, plus with all of the available
|
||||
# tags/platforms included (but commented out) and the
|
||||
# dependencies section
|
||||
platforms = []
|
||||
if not offline and self.api:
|
||||
platforms = self.api.get_list("platforms") or []
|
||||
categories = []
|
||||
if not offline and self.api:
|
||||
categories = self.api.get_list("categories") or []
|
||||
|
||||
# group the list of platforms from the api based
|
||||
# on their names, with the release field being
|
||||
# appended to a list of versions
|
||||
platform_groups = defaultdict(list)
|
||||
for platform in platforms:
|
||||
platform_groups[platform['name']].append(platform['release'])
|
||||
platform_groups[platform['name']].sort()
|
||||
|
||||
inject = dict(
|
||||
author = 'your name',
|
||||
company = 'your company (optional)',
|
||||
license = 'license (GPLv2, CC-BY, etc)',
|
||||
issue_tracker_url = 'http://example.com/issue/tracker',
|
||||
min_ansible_version = '1.2',
|
||||
platforms = platform_groups,
|
||||
categories = categories,
|
||||
)
|
||||
rendered_meta = Environment().from_string(self.galaxy.default_meta).render(inject)
|
||||
f = open(main_yml_path, 'w')
|
||||
f.write(rendered_meta)
|
||||
f.close()
|
||||
pass
|
||||
elif dir not in ('files','templates'):
|
||||
# just write a (mostly) empty YAML file for main.yml
|
||||
f = open(main_yml_path, 'w')
|
||||
f.write('---\n# %s file for %s\n' % (dir,role_name))
|
||||
f.close()
|
||||
self.display.display("- %s was created successfully" % role_name)
|
||||
|
||||
def execute_info(self):
|
||||
"""
|
||||
Executes the info action. This action prints out detailed
|
||||
information about an installed role as well as info available
|
||||
from the galaxy API.
|
||||
"""
|
||||
|
||||
if len(self.args) == 0:
|
||||
# the user needs to specify a role
|
||||
raise AnsibleOptionsError("- you must specify a user/role name")
|
||||
|
||||
roles_path = self.get_opt("roles_path")
|
||||
|
||||
for role in self.args:
|
||||
|
||||
role_info = {}
|
||||
gr = GalaxyRole(self.galaxy, role)
|
||||
#self.galaxy.add_role(gr)
|
||||
|
||||
install_info = gr.install_info
|
||||
if install_info:
|
||||
if 'version' in install_info:
|
||||
install_info['intalled_version'] = install_info['version']
|
||||
del install_info['version']
|
||||
role_info.update(install_info)
|
||||
|
||||
remote_data = False
|
||||
if self.api:
|
||||
remote_data = self.api.lookup_role_by_name(role, False)
|
||||
|
||||
if remote_data:
|
||||
role_info.update(remote_data)
|
||||
|
||||
if gr.metadata:
|
||||
role_info.update(gr.metadata)
|
||||
|
||||
req = RoleRequirement()
|
||||
__, __, role_spec= req.parse({'role': role})
|
||||
if role_spec:
|
||||
role_info.update(role_spec)
|
||||
|
||||
if role_info:
|
||||
self.display.display("- %s:" % (role))
|
||||
for k in sorted(role_info.keys()):
|
||||
|
||||
if k in self.SKIP_INFO_KEYS:
|
||||
continue
|
||||
|
||||
if isinstance(role_info[k], dict):
|
||||
self.display.display("\t%s: " % (k))
|
||||
for key in sorted(role_info[k].keys()):
|
||||
if key in self.SKIP_INFO_KEYS:
|
||||
continue
|
||||
self.display.display("\t\t%s: %s" % (key, role_info[k][key]))
|
||||
else:
|
||||
self.display.display("\t%s: %s" % (k, role_info[k]))
|
||||
else:
|
||||
self.display.display("- the role %s was not found" % role)
|
||||
|
||||
def execute_install(self):
|
||||
"""
|
||||
Executes the installation action. The args list contains the
|
||||
roles to be installed, unless -f was specified. The list of roles
|
||||
can be a name (which will be downloaded via the galaxy API and github),
|
||||
or it can be a local .tar.gz file.
|
||||
"""
|
||||
|
||||
role_file = self.get_opt("role_file", None)
|
||||
|
||||
if len(self.args) == 0 and role_file is None:
|
||||
# the user needs to specify one of either --role-file
|
||||
# or specify a single user/role name
|
||||
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
|
||||
elif len(self.args) == 1 and not role_file is None:
|
||||
# using a role file is mutually exclusive of specifying
|
||||
# the role name on the command line
|
||||
raise AnsibleOptionsError("- please specify a user/role name, or a roles file, but not both")
|
||||
|
||||
no_deps = self.get_opt("no_deps", False)
|
||||
roles_path = self.get_opt("roles_path")
|
||||
|
||||
roles_done = []
|
||||
roles_left = []
|
||||
role_name = self.args.pop(0).strip()
|
||||
|
||||
gr = GalaxyRole(self.galaxy, role_name)
|
||||
if role_file:
|
||||
f = open(role_file, 'r')
|
||||
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
|
||||
roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f))
|
||||
else:
|
||||
# roles listed in a file, one per line
|
||||
for rname in f.readlines():
|
||||
roles_left.append(GalaxyRole(self.galaxy, rname))
|
||||
f.close()
|
||||
else:
|
||||
# roles were specified directly, so we'll just go out grab them
|
||||
# (and their dependencies, unless the user doesn't want us to).
|
||||
for rname in self.args:
|
||||
roles_left.append(GalaxyRole(self.galaxy, rname))
|
||||
|
||||
while len(roles_left) > 0:
|
||||
# query the galaxy API for the role data
|
||||
role_data = None
|
||||
role = roles_left.pop(0)
|
||||
role_src = role.src
|
||||
role_scm = role.scm
|
||||
role_path = role.path
|
||||
|
||||
if role_path:
|
||||
self.options.roles_path = role_path
|
||||
else:
|
||||
self.options.roles_path = roles_path
|
||||
|
||||
tmp_file = None
|
||||
if role_src and os.path.isfile(role_src):
|
||||
# installing a local tar.gz
|
||||
tmp_file = role_src
|
||||
else:
|
||||
if role_scm:
|
||||
# create tar file from scm url
|
||||
tmp_file = scm_archive_role(role_scm, role_src, role.version, role.name)
|
||||
if role_src:
|
||||
if '://' in role_src:
|
||||
# just download a URL - version will probably be in the URL
|
||||
tmp_file = gr.fetch()
|
||||
else:
|
||||
role_data = self.api.lookup_role_by_name(role_src)
|
||||
if not role_data:
|
||||
self.display.warning("- sorry, %s was not found on %s." % (role_src, self.options.api_server))
|
||||
self.exit_without_ignore()
|
||||
continue
|
||||
|
||||
role_versions = self.api.fetch_role_related('versions', role_data['id'])
|
||||
if not role.version:
|
||||
# convert the version names to LooseVersion objects
|
||||
# and sort them to get the latest version. If there
|
||||
# are no versions in the list, we'll grab the head
|
||||
# of the master branch
|
||||
if len(role_versions) > 0:
|
||||
loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
|
||||
loose_versions.sort()
|
||||
role["version"] = str(loose_versions[-1])
|
||||
else:
|
||||
role["version"] = 'master'
|
||||
elif role['version'] != 'master':
|
||||
if role_versions and role.version not in [a.get('name', None) for a in role_versions]:
|
||||
self.display.warning('role is %s' % role)
|
||||
self.display.warning("- the specified version (%s) was not found in the list of available versions (%s)." % (role.version, role_versions))
|
||||
self.exit_without_ignore()
|
||||
continue
|
||||
|
||||
# download the role. if --no-deps was specified, we stop here,
|
||||
# otherwise we recursively grab roles and all of their deps.
|
||||
tmp_file = gr.fetch(role_data)
|
||||
installed = False
|
||||
if tmp_file:
|
||||
installed = install_role(role.name, role.version, tmp_file, options)
|
||||
# we're done with the temp file, clean it up
|
||||
if tmp_file != role_src:
|
||||
os.unlink(tmp_file)
|
||||
# install dependencies, if we want them
|
||||
|
||||
# this should use new roledepenencies code
|
||||
#if not no_deps and installed:
|
||||
# if not role_data:
|
||||
# role_data = gr.get_metadata(role.get("name"), options)
|
||||
# role_dependencies = role_data['dependencies']
|
||||
# else:
|
||||
# role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id'])
|
||||
# for dep in role_dependencies:
|
||||
# if isinstance(dep, basestring):
|
||||
# dep = ansible.utils.role_spec_parse(dep)
|
||||
# else:
|
||||
# dep = ansible.utils.role_yaml_parse(dep)
|
||||
# if not get_role_metadata(dep["name"], options):
|
||||
# if dep not in roles_left:
|
||||
# print '- adding dependency: %s' % dep["name"]
|
||||
# roles_left.append(dep)
|
||||
# else:
|
||||
# print '- dependency %s already pending installation.' % dep["name"]
|
||||
# else:
|
||||
# print '- dependency %s is already installed, skipping.' % dep["name"]
|
||||
|
||||
if not tmp_file or not installed:
|
||||
self.display.warning("- %s was NOT installed successfully." % role.name)
|
||||
self.exit_without_ignore()
|
||||
return 0
|
||||
|
||||
def execute_remove(self):
|
||||
"""
|
||||
Executes the remove action. The args list contains the list
|
||||
of roles to be removed. This list can contain more than one role.
|
||||
"""
|
||||
|
||||
if len(self.args) == 0:
|
||||
raise AnsibleOptionsError('- you must specify at least one role to remove.')
|
||||
|
||||
for role_name in self.args:
|
||||
role = GalaxyRole(self.galaxy, role_name)
|
||||
try:
|
||||
if role.remove():
|
||||
self.display.display('- successfully removed %s' % role_name)
|
||||
else:
|
||||
self.display.display('- %s is not installed, skipping.' % role_name)
|
||||
except Exception as e:
|
||||
raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e)))
|
||||
|
||||
return 0
|
||||
|
||||
def execute_list(self):
|
||||
"""
|
||||
Executes the list action. The args list can contain zero
|
||||
or one role. If one is specified, only that role will be
|
||||
shown, otherwise all roles in the specified directory will
|
||||
be shown.
|
||||
"""
|
||||
|
||||
if len(self.args) > 1:
|
||||
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
|
||||
|
||||
if len(self.args) == 1:
|
||||
# show only the request role, if it exists
|
||||
gr = GalaxyRole(self.galaxy, self.name)
|
||||
if gr.metadata:
|
||||
install_info = gr.install_info
|
||||
version = None
|
||||
if install_info:
|
||||
version = install_info.get("version", None)
|
||||
if not version:
|
||||
version = "(unknown version)"
|
||||
# show some more info about single roles here
|
||||
self.display.display("- %s, %s" % (self.name, version))
|
||||
else:
|
||||
self.display.display("- the role %s was not found" % self.name)
|
||||
else:
|
||||
# show all valid roles in the roles_path directory
|
||||
roles_path = self.get_opt('roles_path')
|
||||
roles_path = os.path.expanduser(roles_path)
|
||||
if not os.path.exists(roles_path):
|
||||
raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path)
|
||||
elif not os.path.isdir(roles_path):
|
||||
raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path)
|
||||
path_files = os.listdir(roles_path)
|
||||
for path_file in path_files:
|
||||
if gr.metadata:
|
||||
install_info = gr.metadata
|
||||
version = None
|
||||
if install_info:
|
||||
version = install_info.get("version", None)
|
||||
if not version:
|
||||
version = "(unknown version)"
|
||||
self.display.display("- %s, %s" % (path_file, version))
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------------
|
||||
# The main entry point
|
||||
#-------------------------------------------------------------------------------------
|
||||
if __name__ == '__main__':
|
||||
|
||||
display = Display()
|
||||
try:
|
||||
cli = GalaxyCLI(sys.argv, display=display)
|
||||
cli.parse()
|
||||
sys.exit(cli.run())
|
||||
except AnsibleOptionsError as e:
|
||||
cli.parser.print_help()
|
||||
display.display(str(e), stderr=True, color='red')
|
||||
sys.exit(1)
|
||||
except AnsibleError as e:
|
||||
display.display(str(e), stderr=True, color='red')
|
||||
sys.exit(2)
|
||||
except KeyboardInterrupt:
|
||||
display.error("interrupted")
|
||||
sys.exit(4)
|
|
@ -44,7 +44,7 @@ from ansible.parsing import DataLoader
|
|||
from ansible.parsing.splitter import parse_kv
|
||||
from ansible.playbook import Playbook
|
||||
from ansible.playbook.task import Task
|
||||
from ansible.utils.cli import base_parser, validate_conflicts, normalize_become_options, ask_passwords
|
||||
from ansible.utils.cli import CLI
|
||||
from ansible.utils.display import Display
|
||||
from ansible.utils.unicode import to_unicode
|
||||
from ansible.utils.vars import combine_vars
|
||||
|
@ -53,146 +53,162 @@ from ansible.vars import VariableManager
|
|||
|
||||
#---------------------------------------------------------------------------------------------------
|
||||
|
||||
def main(display, args):
|
||||
''' run ansible-playbook operations '''
|
||||
class PlaybookCLI(CLI):
|
||||
''' code behind ansible playbook cli'''
|
||||
|
||||
# create parser for CLI options
|
||||
parser = base_parser(
|
||||
usage = "%prog playbook.yml",
|
||||
connect_opts=True,
|
||||
meta_opts=True,
|
||||
runas_opts=True,
|
||||
subset_opts=True,
|
||||
check_opts=True,
|
||||
diff_opts=True,
|
||||
)
|
||||
def parse(self):
|
||||
|
||||
# ansible playbook specific opts
|
||||
parser.add_option('--list-tasks', dest='listtasks', action='store_true',
|
||||
help="list all tasks that would be executed")
|
||||
parser.add_option('--step', dest='step', action='store_true',
|
||||
help="one-step-at-a-time: confirm each task before running")
|
||||
parser.add_option('--start-at-task', dest='start_at',
|
||||
help="start the playbook at the task matching this name")
|
||||
parser.add_option('--list-tags', dest='listtags', action='store_true',
|
||||
help="list all available tags")
|
||||
# create parser for CLI options
|
||||
parser = CLI.base_parser(
|
||||
usage = "%prog playbook.yml",
|
||||
connect_opts=True,
|
||||
meta_opts=True,
|
||||
runas_opts=True,
|
||||
subset_opts=True,
|
||||
check_opts=True,
|
||||
diff_opts=True,
|
||||
)
|
||||
|
||||
options, args = parser.parse_args(args)
|
||||
# ansible playbook specific opts
|
||||
parser.add_option('--list-tasks', dest='listtasks', action='store_true',
|
||||
help="list all tasks that would be executed")
|
||||
parser.add_option('--step', dest='step', action='store_true',
|
||||
help="one-step-at-a-time: confirm each task before running")
|
||||
parser.add_option('--start-at-task', dest='start_at',
|
||||
help="start the playbook at the task matching this name")
|
||||
parser.add_option('--list-tags', dest='listtags', action='store_true',
|
||||
help="list all available tags")
|
||||
|
||||
if len(args) == 0:
|
||||
parser.print_help(file=sys.stderr)
|
||||
return 1
|
||||
self.options, self.args = parser.parse_args()
|
||||
|
||||
display.verbosity = options.verbosity
|
||||
validate_conflicts(parser,options)
|
||||
if len(self.args) == 0:
|
||||
parser.print_help(file=sys.stderr)
|
||||
raise AnsibleError("You must specify a playbook file to run")
|
||||
|
||||
# Note: slightly wrong, this is written so that implicit localhost
|
||||
# Manage passwords
|
||||
sshpass = None
|
||||
becomepass = None
|
||||
vault_pass = None
|
||||
self.parser = parser
|
||||
|
||||
# don't deal with privilege escalation when we don't need to
|
||||
if not options.listhosts and not options.listtasks and not options.listtags:
|
||||
normalize_become_options(options)
|
||||
(sshpass, becomepass, vault_pass) = ask_passwords(options)
|
||||
passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
|
||||
self.display.verbosity = self.options.verbosity
|
||||
self.validate_conflicts()
|
||||
|
||||
if options.vault_password_file:
|
||||
# read vault_pass from a file
|
||||
vault_pass = read_vault_file(options.vault_password_file)
|
||||
def run(self):
|
||||
|
||||
loader = DataLoader(vault_password=vault_pass)
|
||||
# Note: slightly wrong, this is written so that implicit localhost
|
||||
# Manage passwords
|
||||
sshpass = None
|
||||
becomepass = None
|
||||
vault_pass = None
|
||||
passwords = {}
|
||||
|
||||
extra_vars = {}
|
||||
for extra_vars_opt in options.extra_vars:
|
||||
extra_vars_opt = to_unicode(extra_vars_opt, errors='strict')
|
||||
if extra_vars_opt.startswith(u"@"):
|
||||
# Argument is a YAML file (JSON is a subset of YAML)
|
||||
data = loader.load_from_file(extra_vars_opt[1:])
|
||||
elif extra_vars_opt and extra_vars_opt[0] in u'[{':
|
||||
# Arguments as YAML
|
||||
data = loader.load(extra_vars_opt)
|
||||
# don't deal with privilege escalation or passwords when we don't need to
|
||||
if not self.options.listhosts and not self.options.listtasks and not self.options.listtags:
|
||||
self.normalize_become_options()
|
||||
(sshpass, becomepass) = self.ask_passwords()
|
||||
passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
|
||||
|
||||
if self.options.vault_password_file:
|
||||
# read vault_pass from a file
|
||||
vault_pass = read_vault_file(self.options.vault_password_file)
|
||||
elif self.options.ask_vault_pass:
|
||||
vault_pass = self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)[0]
|
||||
|
||||
loader = DataLoader(vault_password=vault_pass)
|
||||
|
||||
extra_vars = {}
|
||||
for extra_vars_opt in self.options.extra_vars:
|
||||
extra_vars_opt = to_unicode(extra_vars_opt, errors='strict')
|
||||
if extra_vars_opt.startswith(u"@"):
|
||||
# Argument is a YAML file (JSON is a subset of YAML)
|
||||
data = loader.load_from_file(extra_vars_opt[1:])
|
||||
elif extra_vars_opt and extra_vars_opt[0] in u'[{':
|
||||
# Arguments as YAML
|
||||
data = loader.load(extra_vars_opt)
|
||||
else:
|
||||
# Arguments as Key-value
|
||||
data = parse_kv(extra_vars_opt)
|
||||
extra_vars = combine_vars(extra_vars, data)
|
||||
|
||||
# FIXME: this should be moved inside the playbook executor code
|
||||
only_tags = self.options.tags.split(",")
|
||||
skip_tags = self.options.skip_tags
|
||||
if self.options.skip_tags is not None:
|
||||
skip_tags = self.ptions.skip_tags.split(",")
|
||||
|
||||
# initial error check, to make sure all specified playbooks are accessible
|
||||
# before we start running anything through the playbook executor
|
||||
for playbook in self.args:
|
||||
if not os.path.exists(playbook):
|
||||
raise AnsibleError("the playbook: %s could not be found" % playbook)
|
||||
if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
|
||||
raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
|
||||
|
||||
# create the variable manager, which will be shared throughout
|
||||
# the code, ensuring a consistent view of global variables
|
||||
variable_manager = VariableManager()
|
||||
variable_manager.set_extra_vars(extra_vars)
|
||||
|
||||
# create the inventory, and filter it based on the subset specified (if any)
|
||||
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory)
|
||||
variable_manager.set_inventory(inventory)
|
||||
|
||||
# (which is not returned in list_hosts()) is taken into account for
|
||||
# warning if inventory is empty. But it can't be taken into account for
|
||||
# checking if limit doesn't match any hosts. Instead we don't worry about
|
||||
# limit if only implicit localhost was in inventory to start with.
|
||||
#
|
||||
# Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
|
||||
no_hosts = False
|
||||
if len(inventory.list_hosts()) == 0:
|
||||
# Empty inventory
|
||||
self.display.warning("provided hosts list is empty, only localhost is available")
|
||||
no_hosts = True
|
||||
inventory.subset(self.options.subset)
|
||||
if len(inventory.list_hosts()) == 0 and no_hosts is False:
|
||||
# Invalid limit
|
||||
raise AnsibleError("Specified --limit does not match any hosts")
|
||||
|
||||
# create the playbook executor, which manages running the plays via a task queue manager
|
||||
pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=self.display, options=self.options, passwords=passwords)
|
||||
|
||||
results = pbex.run()
|
||||
|
||||
if isinstance(results, list):
|
||||
for p in results:
|
||||
|
||||
self.display.display('\nplaybook: %s\n' % p['playbook'])
|
||||
for play in p['plays']:
|
||||
if self.options.listhosts:
|
||||
self.display.display("\n %s (%s): host count=%d" % (play['name'], play['pattern'], len(play['hosts'])))
|
||||
for host in play['hosts']:
|
||||
self.display.display(" %s" % host)
|
||||
if self.options.listtasks: #TODO: do we want to display block info?
|
||||
self.display.display("\n %s" % (play['name']))
|
||||
for task in play['tasks']:
|
||||
self.display.display(" %s" % task)
|
||||
if self.options.listtags: #TODO: fix once we figure out block handling above
|
||||
self.display.display("\n %s: tags count=%d" % (play['name'], len(play['tags'])))
|
||||
for tag in play['tags']:
|
||||
self.display.display(" %s" % tag)
|
||||
return 0
|
||||
else:
|
||||
# Arguments as Key-value
|
||||
data = parse_kv(extra_vars_opt)
|
||||
extra_vars = combine_vars(extra_vars, data)
|
||||
return results
|
||||
|
||||
# FIXME: this should be moved inside the playbook executor code
|
||||
only_tags = options.tags.split(",")
|
||||
skip_tags = options.skip_tags
|
||||
if options.skip_tags is not None:
|
||||
skip_tags = options.skip_tags.split(",")
|
||||
|
||||
# initial error check, to make sure all specified playbooks are accessible
|
||||
# before we start running anything through the playbook executor
|
||||
for playbook in args:
|
||||
if not os.path.exists(playbook):
|
||||
raise AnsibleError("the playbook: %s could not be found" % playbook)
|
||||
if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
|
||||
raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
|
||||
|
||||
# create the variable manager, which will be shared throughout
|
||||
# the code, ensuring a consistent view of global variables
|
||||
variable_manager = VariableManager()
|
||||
variable_manager.set_extra_vars(extra_vars)
|
||||
|
||||
# create the inventory, and filter it based on the subset specified (if any)
|
||||
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=options.inventory)
|
||||
variable_manager.set_inventory(inventory)
|
||||
|
||||
# (which is not returned in list_hosts()) is taken into account for
|
||||
# warning if inventory is empty. But it can't be taken into account for
|
||||
# checking if limit doesn't match any hosts. Instead we don't worry about
|
||||
# limit if only implicit localhost was in inventory to start with.
|
||||
#
|
||||
# Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
|
||||
no_hosts = False
|
||||
if len(inventory.list_hosts()) == 0:
|
||||
# Empty inventory
|
||||
display.warning("provided hosts list is empty, only localhost is available")
|
||||
no_hosts = True
|
||||
inventory.subset(options.subset)
|
||||
if len(inventory.list_hosts()) == 0 and no_hosts is False:
|
||||
# Invalid limit
|
||||
raise errors.AnsibleError("Specified --limit does not match any hosts")
|
||||
|
||||
# create the playbook executor, which manages running the plays via a task queue manager
|
||||
pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=passwords)
|
||||
|
||||
results = pbex.run()
|
||||
|
||||
if isinstance(results, list):
|
||||
for p in results:
|
||||
|
||||
display.display('\nplaybook: %s\n' % p['playbook'])
|
||||
for play in p['plays']:
|
||||
if options.listhosts:
|
||||
display.display("\n %s (%s): host count=%d" % (play['name'], play['pattern'], len(play['hosts'])))
|
||||
for host in play['hosts']:
|
||||
display.display(" %s" % host)
|
||||
if options.listtasks: #TODO: do we want to display block info?
|
||||
display.display("\n %s" % (play['name']))
|
||||
for task in play['tasks']:
|
||||
display.display(" %s" % task)
|
||||
if options.listtags: #TODO: fix once we figure out block handling above
|
||||
display.display("\n %s: tags count=%d" % (play['name'], len(play['tags'])))
|
||||
for tag in play['tags']:
|
||||
display.display(" %s" % tag)
|
||||
return 0
|
||||
else:
|
||||
return results
|
||||
########################################################
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
display = Display()
|
||||
#display.display(" ".join(sys.argv), log_only=True)
|
||||
|
||||
try:
|
||||
sys.exit(main(display, sys.argv[1:]))
|
||||
cli = PlaybookCLI(sys.argv, display=display)
|
||||
cli.parse()
|
||||
sys.exit(cli.run())
|
||||
except AnsibleOptionsError as e:
|
||||
cli.parser.print_help()
|
||||
display.display(str(e), stderr=True, color='red')
|
||||
sys.exit(1)
|
||||
except AnsibleError as e:
|
||||
display.display("[ERROR]: %s" % e, color='red', stderr=True)
|
||||
sys.exit(1)
|
||||
display.display(str(e), stderr=True, color='red')
|
||||
sys.exit(2)
|
||||
except KeyboardInterrupt:
|
||||
display.display("[ERROR]: interrupted", color='red', stderr=True)
|
||||
sys.exit(1)
|
||||
display.error("interrupted")
|
||||
sys.exit(4)
|
||||
|
||||
|
|
|
@ -33,143 +33,102 @@ import os
|
|||
import sys
|
||||
import traceback
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
from ansible.parsing.vault import VaultEditor
|
||||
from ansible.utils.cli import base_parser, ask_vault_passwords
|
||||
from ansible.utils.cli import CLI
|
||||
from ansible.utils.display import Display
|
||||
|
||||
#-------------------------------------------------------------------------------------
|
||||
# Utility functions for parsing actions/options
|
||||
#-------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
|
||||
class Cli(object):
|
||||
class VaultCli(CLI):
|
||||
""" Vault command line class """
|
||||
|
||||
VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view")
|
||||
CIPHER = 'AES256'
|
||||
|
||||
|
||||
def __init__(self, display=None):
|
||||
def __init__(self, args, display=None):
|
||||
|
||||
self.vault_pass = None
|
||||
|
||||
if display is None:
|
||||
self.display = Display()
|
||||
else:
|
||||
self.display = display
|
||||
|
||||
super(VaultCli, self).__init__(args, display)
|
||||
|
||||
def parse(self):
|
||||
|
||||
# create parser for CLI options
|
||||
parser = base_parser(
|
||||
self.parser = CLI.base_parser(
|
||||
usage = "%prog vaultfile.yml",
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
self.set_action()
|
||||
|
||||
def run(self, options, args):
|
||||
|
||||
action = self.get_action(args)
|
||||
|
||||
if not action:
|
||||
parser.print_help()
|
||||
raise AnsibleError("missing required action")
|
||||
|
||||
# options specific to actions
|
||||
if action == "create":
|
||||
parser.set_usage("usage: %prog create [options] file_name")
|
||||
elif action == "decrypt":
|
||||
parser.set_usage("usage: %prog decrypt [options] file_name")
|
||||
elif action == "edit":
|
||||
parser.set_usage("usage: %prog edit [options] file_name")
|
||||
elif action == "view":
|
||||
parser.set_usage("usage: %prog view [options] file_name")
|
||||
elif action == "encrypt":
|
||||
parser.set_usage("usage: %prog encrypt [options] file_name")
|
||||
# options specific to self.actions
|
||||
if self.action == "create":
|
||||
self.parser.set_usage("usage: %prog create [options] file_name")
|
||||
elif self.action == "decrypt":
|
||||
self.parser.set_usage("usage: %prog decrypt [options] file_name")
|
||||
elif self.action == "edit":
|
||||
self.parser.set_usage("usage: %prog edit [options] file_name")
|
||||
elif self.action == "view":
|
||||
self.parser.set_usage("usage: %prog view [options] file_name")
|
||||
elif self.action == "encrypt":
|
||||
self.parser.set_usage("usage: %prog encrypt [options] file_name")
|
||||
elif action == "rekey":
|
||||
parser.set_usage("usage: %prog rekey [options] file_name")
|
||||
self.parser.set_usage("usage: %prog rekey [options] file_name")
|
||||
|
||||
if len(args) == 0 or len(args) > 1:
|
||||
parser.print_help()
|
||||
self.options, self.args = self.parser.parse_args()
|
||||
|
||||
if len(self.args) == 0 or len(self.args) > 1:
|
||||
self.parser.print_help()
|
||||
raise AnsibleError("Vault requires a single filename as a parameter")
|
||||
|
||||
if options.vault_password_file:
|
||||
def run(self):
|
||||
|
||||
if self.options.vault_password_file:
|
||||
# read vault_pass from a file
|
||||
self.vault_pass = read_vault_file(options.vault_password_file)
|
||||
else:
|
||||
self.vault_pass, _= ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)
|
||||
self.vault_pass = read_vault_file(self.options.vault_password_file)
|
||||
elif self.options.ask_vault_pass:
|
||||
self.vault_pass, _= self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)
|
||||
|
||||
# execute the desired action
|
||||
fn = getattr(self, "execute_%s" % action)
|
||||
fn(args, options)
|
||||
self.execute()
|
||||
|
||||
def get_action(self, args):
|
||||
"""
|
||||
Get the action the user wants to execute from the
|
||||
sys argv list.
|
||||
"""
|
||||
for i in range(0,len(args)):
|
||||
arg = args[i]
|
||||
if arg in VALID_ACTIONS:
|
||||
del args[i]
|
||||
return arg
|
||||
return None
|
||||
def execute_create(self):
|
||||
|
||||
def execute_create(args, options):
|
||||
|
||||
cipher = 'AES256'
|
||||
if hasattr(options, 'cipher'):
|
||||
cipher = options.cipher
|
||||
|
||||
this_editor = VaultEditor(cipher, self.vault_pass, args[0])
|
||||
cipher = getattr(self.options, 'cipher', self.CIPHER)
|
||||
this_editor = VaultEditor(cipher, self.vault_pass, self.args[0])
|
||||
this_editor.create_file()
|
||||
|
||||
def execute_decrypt(args, options):
|
||||
def execute_decrypt(self):
|
||||
|
||||
cipher = 'AES256'
|
||||
if hasattr(options, 'cipher'):
|
||||
cipher = options.cipher
|
||||
|
||||
for f in args:
|
||||
cipher = getattr(self.options, 'cipher', self.CIPHER)
|
||||
for f in self.args:
|
||||
this_editor = VaultEditor(cipher, self.vault_pass, f)
|
||||
this_editor.decrypt_file()
|
||||
|
||||
self.display.display("Decryption successful")
|
||||
|
||||
def execute_edit(args, options):
|
||||
def execute_edit(self):
|
||||
|
||||
cipher = None
|
||||
|
||||
for f in args:
|
||||
this_editor = VaultEditor(cipher, self.vault_pass, f)
|
||||
for f in self.args:
|
||||
this_editor = VaultEditor(None, self.vault_pass, f)
|
||||
this_editor.edit_file()
|
||||
|
||||
def execute_view(args, options):
|
||||
def execute_view(self):
|
||||
|
||||
cipher = None
|
||||
|
||||
for f in args:
|
||||
this_editor = VaultEditor(cipher, self.vault_pass, f)
|
||||
for f in self.args:
|
||||
this_editor = VaultEditor(None, self.vault_pass, f)
|
||||
this_editor.view_file()
|
||||
|
||||
def execute_encrypt(args, options):
|
||||
def execute_encrypt(self):
|
||||
|
||||
cipher = 'AES256'
|
||||
if hasattr(options, 'cipher'):
|
||||
cipher = options.cipher
|
||||
|
||||
for f in args:
|
||||
cipher = getattr(self.options, 'cipher', self.CIPHER)
|
||||
for f in self.args:
|
||||
this_editor = VaultEditor(cipher, self.vault_pass, f)
|
||||
this_editor.encrypt_file()
|
||||
|
||||
self.display.display("Encryption successful")
|
||||
|
||||
def execute_rekey(args, options ):
|
||||
__, new_password = ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True)
|
||||
def execute_rekey(self):
|
||||
__, new_password = self.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True)
|
||||
|
||||
cipher = None
|
||||
for f in args:
|
||||
this_editor = VaultEditor(cipher, self.vault_pass, f)
|
||||
for f in self.args:
|
||||
this_editor = VaultEditor(None, self.vault_pass, f)
|
||||
this_editor.rekey_file(new_password)
|
||||
|
||||
self.display.display("Rekey successful")
|
||||
|
@ -179,15 +138,17 @@ class Cli(object):
|
|||
if __name__ == "__main__":
|
||||
|
||||
display = Display()
|
||||
#display.display(" ".join(sys.argv), log_only=True)
|
||||
|
||||
try:
|
||||
cli = Cli(display=display)
|
||||
(options, args) = cli.parse()
|
||||
sys.exit(cli.run(options, args))
|
||||
cli = VaultCli(sys.argv, display=display)
|
||||
cli.parse()
|
||||
sys.exit(cli.run())
|
||||
except AnsibleOptionsError as e:
|
||||
cli.parser.print_help()
|
||||
display.display(str(e), stderr=True, color='red')
|
||||
sys.exit(1)
|
||||
except AnsibleError as e:
|
||||
display.display("[ERROR]: %s" % e, color='red', stderr=True)
|
||||
sys.exit(1)
|
||||
display.display(str(e), stderr=True, color='red')
|
||||
sys.exit(2)
|
||||
except KeyboardInterrupt:
|
||||
display.display("[ERROR]: interrupted", color='red', stderr=True)
|
||||
sys.exit(1)
|
||||
display.error("interrupted")
|
||||
sys.exit(4)
|
||||
|
|
Loading…
Reference in a new issue