Merge remote-tracking branch 'upstream/devel' into devel
Conflicts: library/hg
This commit is contained in:
commit
2f93742dd1
12 changed files with 440 additions and 241 deletions
6
apt
6
apt
|
@ -70,7 +70,7 @@ options:
|
|||
author: Matthew Williams
|
||||
notes: []
|
||||
examples:
|
||||
- code: "apt: pkg=foo update-cache=yes"
|
||||
- code: "apt: pkg=foo update_cache=yes"
|
||||
description: Update repositories cache and install C(foo) package
|
||||
- code: "apt: pkg=foo state=removed"
|
||||
description: Remove C(foo) package
|
||||
|
@ -78,9 +78,9 @@ examples:
|
|||
description: Install the package C(foo)
|
||||
- code: "apt: pkg=foo=1.00 state=installed"
|
||||
description: Install the version '1.00' of package C(foo)
|
||||
- code: "apt: pkg=nginx state=latest default-release=squeeze-backports update-cache=yes"
|
||||
- code: "apt: pkg=nginx state=latest default_release=squeeze-backports update_cache=yes"
|
||||
description: Update the repository cache and update package C(ngnix) to latest version using default release C(squeeze-backport)
|
||||
- code: "apt: pkg=openjdk-6-jdk state=latest install-recommends=no"
|
||||
- code: "apt: pkg=openjdk-6-jdk state=latest install_recommends=no"
|
||||
description: Install latest version of C(openjdk-6-jdk) ignoring C(install-reccomends)
|
||||
'''
|
||||
|
||||
|
|
223
apt_key
223
apt_key
|
@ -22,7 +22,7 @@
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: apt_key
|
||||
author: Jayson Vantuyl
|
||||
author: Jayson Vantuyl & others
|
||||
version_added: 1.0
|
||||
short_description: Add or remove an apt key
|
||||
description:
|
||||
|
@ -59,195 +59,116 @@ examples:
|
|||
description: Remove a Apt specific signing key
|
||||
'''
|
||||
|
||||
# FIXME: standardize into module_common
|
||||
from urllib2 import urlopen, URLError
|
||||
from traceback import format_exc
|
||||
from subprocess import Popen, PIPE, call
|
||||
from re import compile as re_compile
|
||||
# FIXME: standardize into module_common
|
||||
from distutils.spawn import find_executable
|
||||
from os import environ
|
||||
from sys import exc_info
|
||||
import traceback
|
||||
|
||||
match_key = re_compile("^gpg:.*key ([0-9a-fA-F]+):.*$")
|
||||
|
||||
REQUIRED_EXECUTABLES=['gpg', 'grep', 'apt-key']
|
||||
|
||||
|
||||
def find_missing_binaries():
|
||||
return [missing for missing in REQUIRED_EXECUTABLES if not find_executable(missing)]
|
||||
def check_missing_binaries(module):
|
||||
missing = [e for e in REQUIRED_EXECUTABLES if not find_executable(e)]
|
||||
if len(missing):
|
||||
module.fail_json(msg="binaries are missing", names=all)
|
||||
|
||||
def all_keys(module):
|
||||
(rc, out, err) = module.run_command("apt-key list")
|
||||
results = []
|
||||
lines = out.split('\n')
|
||||
for line in lines:
|
||||
if line.startswith("pub"):
|
||||
tokens = line.split()
|
||||
code = tokens[1]
|
||||
(len_type, real_code) = code.split("/")
|
||||
results.append(real_code)
|
||||
return results
|
||||
|
||||
def get_key_ids(key_data):
|
||||
p = Popen("gpg --list-only --import -", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
(stdo, stde) = p.communicate(key_data)
|
||||
def key_present(module, key_id):
|
||||
(rc, out, err) = module.run_command("apt-key list | 2>&1 grep -q %s" % key_id)
|
||||
return rc == 0
|
||||
|
||||
if p.returncode > 0:
|
||||
raise Exception("error running GPG to retrieve keys")
|
||||
|
||||
output = stdo + stde
|
||||
|
||||
for line in output.split('\n'):
|
||||
match = match_key.match(line)
|
||||
if match:
|
||||
yield match.group(1)
|
||||
|
||||
|
||||
def key_present(key_id):
|
||||
return call("apt-key list | 2>&1 grep -q %s" % key_id, shell=True) == 0
|
||||
|
||||
|
||||
def download_key(url):
|
||||
def download_key(module, url):
|
||||
# FIXME: move get_url code to common, allow for in-memory D/L, support proxies
|
||||
# and reuse here
|
||||
if url is None:
|
||||
raise Exception("Needed URL but none specified")
|
||||
connection = urlopen(url)
|
||||
if connection is None:
|
||||
raise Exception("error connecting to download key from %r" % url)
|
||||
return connection.read()
|
||||
module.fail_json(msg="needed a URL but was not specified")
|
||||
try:
|
||||
connection = urlopen(url)
|
||||
if connection is None:
|
||||
module.fail_json("error connecting to download key from url")
|
||||
data = connection.read()
|
||||
return data
|
||||
except:
|
||||
module.fail_json(msg="error getting key id from url", traceback=format_exc())
|
||||
|
||||
|
||||
def add_key(key):
|
||||
return call("apt-key add -", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
(_, _) = p.communicate(key)
|
||||
|
||||
return p.returncode == 0
|
||||
|
||||
def add_key(module, key):
|
||||
cmd = "apt-key add -"
|
||||
(rc, out, err) = module.run_command(cmd, data=key, check_rc=True)
|
||||
return True
|
||||
|
||||
def remove_key(key_id):
|
||||
return call('apt-key del %s' % key_id, shell=True) == 0
|
||||
|
||||
|
||||
def return_values(tb=False):
|
||||
if tb:
|
||||
return {'exception': format_exc()}
|
||||
else:
|
||||
return {}
|
||||
|
||||
|
||||
# use cues from the environment to mock out functions for testing
|
||||
if 'ANSIBLE_TEST_APT_KEY' in environ:
|
||||
orig_download_key = download_key
|
||||
KEY_ADDED=0
|
||||
KEY_REMOVED=0
|
||||
KEY_DOWNLOADED=0
|
||||
|
||||
|
||||
def download_key(url):
|
||||
global KEY_DOWNLOADED
|
||||
KEY_DOWNLOADED += 1
|
||||
return orig_download_key(url)
|
||||
|
||||
|
||||
def find_missing_binaries():
|
||||
return []
|
||||
|
||||
|
||||
def add_key(key):
|
||||
global KEY_ADDED
|
||||
KEY_ADDED += 1
|
||||
return True
|
||||
|
||||
|
||||
def remove_key(key_id):
|
||||
global KEY_REMOVED
|
||||
KEY_REMOVED += 1
|
||||
return True
|
||||
|
||||
|
||||
def return_values(tb=False):
|
||||
extra = dict(
|
||||
added=KEY_ADDED,
|
||||
removed=KEY_REMOVED,
|
||||
downloaded=KEY_DOWNLOADED
|
||||
)
|
||||
if tb:
|
||||
extra['exception'] = format_exc()
|
||||
return extra
|
||||
|
||||
|
||||
if environ.get('ANSIBLE_TEST_APT_KEY') == 'none':
|
||||
def key_present(key_id):
|
||||
return False
|
||||
else:
|
||||
def key_present(key_id):
|
||||
return key_id == environ['ANSIBLE_TEST_APT_KEY']
|
||||
|
||||
# FIXME: use module.run_command, fail at point of error and don't discard useful stdin/stdout
|
||||
cmd = 'apt-key del %s'
|
||||
(rc, out, err) = module.run_command(cmd, check_rc=True)
|
||||
return True
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
id=dict(required=False, default=None),
|
||||
url=dict(required=False),
|
||||
data=dict(required=False),
|
||||
key=dict(required=False),
|
||||
state=dict(required=False, choices=['present', 'absent'], default='present')
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
expected_key_id = module.params['id']
|
||||
url = module.params['url']
|
||||
state = module.params['state']
|
||||
changed = False
|
||||
key_id = module.params['id']
|
||||
url = module.params['url']
|
||||
data = module.params['data']
|
||||
state = module.params['state']
|
||||
changed = False
|
||||
|
||||
# FIXME: I think we have a common facility for this, if not, want
|
||||
check_missing_binaries(module)
|
||||
|
||||
missing = find_missing_binaries()
|
||||
|
||||
if missing:
|
||||
module.fail_json(msg="can't find needed binaries to run", missing=missing,
|
||||
**return_values())
|
||||
keys = all_keys(module)
|
||||
|
||||
if state == 'present':
|
||||
if expected_key_id and key_present(expected_key_id):
|
||||
# key is present, nothing to do
|
||||
pass
|
||||
if key_id and key_id in keys:
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
# download key
|
||||
try:
|
||||
key = download_key(url)
|
||||
(key_id,) = tuple(get_key_ids(key)) # TODO: support multiple key ids?
|
||||
except Exception:
|
||||
module.fail_json(
|
||||
msg="error getting key id from url",
|
||||
**return_values(True)
|
||||
)
|
||||
|
||||
# sanity check downloaded key
|
||||
if expected_key_id and key_id != expected_key_id:
|
||||
module.fail_json(
|
||||
msg="expected key id %s, got key id %s" % (expected_key_id, key_id),
|
||||
**return_values()
|
||||
)
|
||||
|
||||
# actually add key
|
||||
if key_present(key_id):
|
||||
changed=False
|
||||
elif add_key(key):
|
||||
changed=True
|
||||
if not data:
|
||||
data = download_key(module, url)
|
||||
if key_id and key_id in keys:
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
module.fail_json(
|
||||
msg="failed to add key id %s" % key_id,
|
||||
**return_values()
|
||||
)
|
||||
add_key(module, data)
|
||||
changed=False
|
||||
keys2 = all_keys(module)
|
||||
if len(keys) != len(keys2):
|
||||
changed=True
|
||||
if key_id and not key_id in keys2:
|
||||
module.fail_json(msg="key does not seem to have been added", id=key_id)
|
||||
module.exit_json(changed=changed)
|
||||
elif state == 'absent':
|
||||
# optionally download the key and get the id
|
||||
if not expected_key_id:
|
||||
try:
|
||||
key = download_key(url)
|
||||
(key_id,) = tuple(get_key_ids(key)) # TODO: support multiple key ids?
|
||||
except Exception:
|
||||
module.fail_json(
|
||||
msg="error getting key id from url",
|
||||
**return_values(True)
|
||||
)
|
||||
else:
|
||||
key_id = expected_key_id
|
||||
|
||||
# actually remove key
|
||||
if key_present(key_id):
|
||||
if not key_id:
|
||||
module.fail_json(msg="key is required")
|
||||
if key_id in keys:
|
||||
if remove_key(key_id):
|
||||
changed=True
|
||||
else:
|
||||
# FIXME: module.fail_json or exit-json immediately at point of failure
|
||||
module.fail_json(msg="error removing key_id", **return_values(True))
|
||||
else:
|
||||
module.fail_json(
|
||||
msg="unexpected state: %s" % state,
|
||||
**return_values()
|
||||
)
|
||||
|
||||
module.exit_json(changed=changed, **return_values())
|
||||
|
||||
|
|
10
assemble
10
assemble
|
@ -91,12 +91,12 @@ def main():
|
|||
|
||||
module = AnsibleModule(
|
||||
# not checking because of daisy chain to file module
|
||||
check_invalid_arguments = False,
|
||||
argument_spec = dict(
|
||||
src = dict(required=True),
|
||||
dest = dict(required=True),
|
||||
backup=dict(default=False, choices=BOOLEANS),
|
||||
)
|
||||
),
|
||||
add_file_common_args=True
|
||||
)
|
||||
|
||||
changed=False
|
||||
|
@ -124,11 +124,11 @@ def main():
|
|||
shutil.copy(path, dest)
|
||||
changed = True
|
||||
|
||||
|
||||
file_args = module.load_file_common_arguments(module.params)
|
||||
changed = module.set_file_attributes_if_different(file_args, changed)
|
||||
# Mission complete
|
||||
module.exit_json(src=src, dest=dest, md5sum=destmd5,
|
||||
changed=changed, msg="OK",
|
||||
daisychain="file", daisychain_args=module.params)
|
||||
changed=changed, msg="OK")
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
|
|
25
ec2
25
ec2
|
@ -66,7 +66,7 @@ options:
|
|||
aliases: []
|
||||
ec2_url:
|
||||
description:
|
||||
- url to use to connect to ec2 or your cloud (for example U(https://ec2.amazonaws.com) when using Amazon ec2 directly and not Eucalyptus)
|
||||
- url to use to connect to ec2 or your Eucalyptus cloud (for example (https://ec2.amazonaws.com) when using Amazon ec2 directly and not Eucalyptus)
|
||||
required: False
|
||||
default: null
|
||||
aliases: []
|
||||
|
@ -82,6 +82,12 @@ options:
|
|||
required: False
|
||||
default: null
|
||||
aliases: []
|
||||
count:
|
||||
description:
|
||||
- number of instances to launch
|
||||
required: False
|
||||
default: 1
|
||||
aliases: []
|
||||
user_data:
|
||||
version_added: "0.9"
|
||||
description:
|
||||
|
@ -90,10 +96,10 @@ options:
|
|||
default: null
|
||||
aliases: []
|
||||
examples:
|
||||
- code: "local_action: ec2 keypair=admin instance_type=m1.large image=emi-40603AD1 wait=true group=webserver"
|
||||
- code: "local_action: ec2 keypair=admin instance_type=m1.large image=emi-40603AD1 wait=true group=webserver count=3"
|
||||
description: "Examples from Ansible Playbooks"
|
||||
requirements: [ "boto" ]
|
||||
author: Seth Vidal, Tim Gerla
|
||||
author: Seth Vidal, Tim Gerla, Lester Wade
|
||||
'''
|
||||
|
||||
import sys
|
||||
|
@ -113,7 +119,7 @@ def main():
|
|||
instance_type = dict(aliases=['type']),
|
||||
image = dict(required=True),
|
||||
kernel = dict(),
|
||||
#count = dict(default='1'), # maybe someday
|
||||
count = dict(default='1'),
|
||||
ramdisk = dict(),
|
||||
wait = dict(choices=BOOLEANS, default=False),
|
||||
ec2_url = dict(aliases=['EC2_URL']),
|
||||
|
@ -127,7 +133,7 @@ def main():
|
|||
group = module.params.get('group')
|
||||
instance_type = module.params.get('instance_type')
|
||||
image = module.params.get('image')
|
||||
#count = module.params.get('count')
|
||||
count = module.params.get('count')
|
||||
kernel = module.params.get('kernel')
|
||||
ramdisk = module.params.get('ramdisk')
|
||||
wait = module.params.get('wait')
|
||||
|
@ -148,10 +154,12 @@ def main():
|
|||
ec2 = boto.connect_ec2_endpoint(ec2_url, ec2_access_key, ec2_secret_key)
|
||||
else: # otherwise it's Amazon.
|
||||
ec2 = boto.connect_ec2(ec2_access_key, ec2_secret_key)
|
||||
|
||||
|
||||
# Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want.
|
||||
|
||||
try:
|
||||
res = ec2.run_instances(image, key_name = key_name,
|
||||
min_count = 1, max_count = 1,
|
||||
min_count = count, max_count = count,
|
||||
security_groups = [group],
|
||||
instance_type = instance_type,
|
||||
kernel_id = kernel,
|
||||
|
@ -171,9 +179,8 @@ def main():
|
|||
res_list = res.connection.get_all_instances(instids)
|
||||
this_res = res_list[0]
|
||||
num_running = len([ i for i in this_res.instances if i.state=='running' ])
|
||||
time.sleep(2)
|
||||
time.sleep(5)
|
||||
|
||||
# there's only one - but maybe one day there could be more
|
||||
instances = []
|
||||
for inst in this_res.instances:
|
||||
d = {
|
||||
|
|
122
ec2_facts
Normal file
122
ec2_facts
Normal file
|
@ -0,0 +1,122 @@
|
|||
#!/usr/bin/python -tt
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION="""
|
||||
---
|
||||
module: ec2_facts
|
||||
short_description: Gathers facts about remote hosts within ec2 (aws)
|
||||
options: {}
|
||||
description:
|
||||
- This module fetches data from the metadata servers in ec2 (aws).
|
||||
Eucalyptus cloud provides a similar service and this module should
|
||||
work this cloud provider as well.
|
||||
notes:
|
||||
- Parameters to filter on ec2_facts may be added later.
|
||||
examples:
|
||||
- code: ansible all -m ec2_facts
|
||||
description: Obtain facts from ec2 metatdata servers. You will need to run an instance within ec2.
|
||||
author: "Silviu Dicu <silviudicu@gmail.com>"
|
||||
"""
|
||||
|
||||
import urllib2
|
||||
import socket
|
||||
import re
|
||||
|
||||
socket.setdefaulttimeout(5)
|
||||
|
||||
class Ec2Metadata(object):
|
||||
|
||||
ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/'
|
||||
ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key'
|
||||
ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/'
|
||||
|
||||
def __init__(self, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None):
|
||||
self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri
|
||||
self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri
|
||||
self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri
|
||||
self._data = {}
|
||||
self._prefix = 'ansible_ec2_%s'
|
||||
|
||||
def _fetch(self, url):
|
||||
try:
|
||||
return urllib2.urlopen(url).read()
|
||||
except urllib2.HTTPError:
|
||||
return
|
||||
except urllib2.URLError:
|
||||
return
|
||||
|
||||
def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']):
|
||||
new_fields = {}
|
||||
for key, value in fields.iteritems():
|
||||
split_fields = key[len(uri):].split('/')
|
||||
if len(split_fields) > 1 and split_fields[1]:
|
||||
new_key = "-".join(split_fields)
|
||||
new_fields[self._prefix % new_key] = value
|
||||
else:
|
||||
new_key = "".join(split_fields)
|
||||
new_fields[self._prefix % new_key] = value
|
||||
for pattern in filter_patterns:
|
||||
for key in new_fields.keys():
|
||||
match = re.search(pattern, key)
|
||||
if match: new_fields.pop(key)
|
||||
return new_fields
|
||||
|
||||
def fetch(self, uri, recurse=True):
|
||||
raw_subfields = self._fetch(uri)
|
||||
if not raw_subfields:
|
||||
return
|
||||
subfields = raw_subfields.split('\n')
|
||||
for field in subfields:
|
||||
if field.endswith('/') and recurse:
|
||||
self.fetch(uri + field)
|
||||
if uri.endswith('/'):
|
||||
new_uri = uri + field
|
||||
else:
|
||||
new_uri = uri + '/' + field
|
||||
if new_uri not in self._data and not new_uri.endswith('/'):
|
||||
content = self._fetch(new_uri)
|
||||
if field == 'security-groups':
|
||||
sg_fields = ",".join(content.split('\n'))
|
||||
self._data['%s' % (new_uri)] = sg_fields
|
||||
else:
|
||||
self._data['%s' % (new_uri)] = content
|
||||
|
||||
def run(self):
|
||||
self.fetch(self.uri_meta) # populate _data
|
||||
data = self._mangle_fields(self._data,
|
||||
self.uri_meta)
|
||||
data[self._prefix % 'user-data'] = self._fetch(self.uri_user)
|
||||
data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh)
|
||||
return data
|
||||
|
||||
|
||||
def main():
|
||||
ec2_facts = Ec2Metadata().run()
|
||||
ec2_facts_result = {
|
||||
"changed" : False,
|
||||
"ansible_facts" : ec2_facts
|
||||
}
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict()
|
||||
)
|
||||
module.exit_json(**ec2_facts_result)
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
|
||||
main()
|
4
fireball
4
fireball
|
@ -143,7 +143,7 @@ def daemonize_self(module, password, port, minutes):
|
|||
os.dup2(dev_null.fileno(), sys.stderr.fileno())
|
||||
log("daemonizing successful (%s,%s)" % (password, port))
|
||||
|
||||
def command(data):
|
||||
def command(module, data):
|
||||
if 'cmd' not in data:
|
||||
return dict(failed=True, msg='internal error: cmd is required')
|
||||
if 'tmp_path' not in data:
|
||||
|
@ -220,7 +220,7 @@ def serve(module, password, port, minutes):
|
|||
response = {}
|
||||
|
||||
if mode == 'command':
|
||||
response = command(data)
|
||||
response = command(module, data)
|
||||
elif mode == 'put':
|
||||
response = put(data)
|
||||
elif mode == 'fetch':
|
||||
|
|
36
get_url
36
get_url
|
@ -35,7 +35,7 @@ version_added: "0.6"
|
|||
options:
|
||||
url:
|
||||
description:
|
||||
- HTTP, HTTPS, or FTP URL
|
||||
- HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
|
@ -63,18 +63,18 @@ examples:
|
|||
- code: "get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf mode=0440"
|
||||
description: "Example from Ansible Playbooks"
|
||||
notes:
|
||||
- This module doesn't yet support configuration for proxies or passwords.
|
||||
- This module doesn't yet support configuration for proxies.
|
||||
# informational: requirements for nodes
|
||||
requirements: [ urllib2, urlparse ]
|
||||
author: Jan-Piet Mens
|
||||
'''
|
||||
|
||||
HAS_URLLIB2=True
|
||||
HAS_URLLIB2 = True
|
||||
try:
|
||||
import urllib2
|
||||
except ImportError:
|
||||
HAS_URLLIB2=False
|
||||
HAS_URLPARSE=True
|
||||
HAS_URLLIB2 = False
|
||||
HAS_URLPARSE = True
|
||||
|
||||
try:
|
||||
import urlparse
|
||||
|
@ -100,6 +100,29 @@ def url_do_get(module, url, dest):
|
|||
USERAGENT = 'ansible-httpget'
|
||||
info = dict(url=url, dest=dest)
|
||||
r = None
|
||||
parsed = urlparse.urlparse(url)
|
||||
if '@' in parsed.netloc:
|
||||
credentials = parsed.netloc.split('@')[0]
|
||||
if ':' in credentials:
|
||||
username, password = credentials.split(':')
|
||||
netloc = parsed.netloc.split('@')[1]
|
||||
parsed = list(parsed)
|
||||
parsed[1] = netloc
|
||||
|
||||
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
|
||||
# this creates a password manager
|
||||
passman.add_password(None, netloc, username, password)
|
||||
# because we have put None at the start it will always
|
||||
# use this username/password combination for urls
|
||||
# for which `theurl` is a super-url
|
||||
|
||||
authhandler = urllib2.HTTPBasicAuthHandler(passman)
|
||||
# create the AuthHandler
|
||||
|
||||
opener = urllib2.build_opener(authhandler)
|
||||
urllib2.install_opener(opener)
|
||||
#reconstruct url without credentials
|
||||
url = urlparse.urlunparse(parsed)
|
||||
|
||||
request = urllib2.Request(url)
|
||||
request.add_header('User-agent', USERAGENT)
|
||||
|
@ -232,8 +255,7 @@ def main():
|
|||
|
||||
# Mission complete
|
||||
module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum_src,
|
||||
changed=changed, msg=info.get('msg',''),
|
||||
daisychain="file", daisychain_args=info.get('daisychain_args',''))
|
||||
changed=changed, msg=info.get('msg', ''))
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
|
|
66
hg
66
hg
|
@ -122,32 +122,30 @@ def _undo_hgrc(hgrc, vals):
|
|||
parser.write(f)
|
||||
f.close()
|
||||
|
||||
def _hg_command(args_list):
|
||||
cmd = ['hg'] + args_list
|
||||
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
|
||||
out, err = p.communicate()
|
||||
return out, err, p.returncode
|
||||
def _hg_command(module, args_list):
|
||||
(rc, out, err) = module.run_command(['hg'] + args_list)
|
||||
return (out, err, rc)
|
||||
|
||||
def _hg_discard(dest):
|
||||
out, err, code = _hg_command(['up', '-C', '-R', dest])
|
||||
def _hg_discard(module, dest):
|
||||
out, err, code = _hg_command(module, ['up', '-C', '-R', dest])
|
||||
if code != 0:
|
||||
raise HgError(err)
|
||||
|
||||
def _hg_purge(dest):
|
||||
def _hg_purge(module, dest):
|
||||
hgrc = os.path.join(dest, '.hg/hgrc')
|
||||
purge_option = [('extensions', 'purge', '')]
|
||||
_set_hgrc(hgrc, purge_option)
|
||||
out, err, code = _hg_command(['purge', '-R', dest])
|
||||
out, err, code = _hg_command(module, ['purge', '-R', dest])
|
||||
if code == 0:
|
||||
_undo_hgrc(hgrc, purge_option)
|
||||
else:
|
||||
raise HgError(err)
|
||||
|
||||
def _hg_verify(dest):
|
||||
def _hg_verify(module, dest):
|
||||
error1 = "hg verify failed."
|
||||
error2 = "{dest} is not a repository.".format(dest=dest)
|
||||
|
||||
out, err, code = _hg_command(['verify', '-R', dest])
|
||||
out, err, code = _hg_command(module, ['verify', '-R', dest])
|
||||
if code == 1:
|
||||
raise HgError(error1, stderr=err)
|
||||
elif code == 255:
|
||||
|
@ -155,7 +153,7 @@ def _hg_verify(dest):
|
|||
elif code == 0:
|
||||
return True
|
||||
|
||||
def _post_op_hg_revision_check(dest, revision):
|
||||
def _post_op_hg_revision_check(module, dest, revision):
|
||||
"""
|
||||
Verify the tip is the same as `revision`.
|
||||
|
||||
|
@ -170,13 +168,13 @@ def _post_op_hg_revision_check(dest, revision):
|
|||
err2 = "tip is different from %s. See below for extended summary." % revision
|
||||
|
||||
if revision == 'default':
|
||||
out, err, code = _hg_command(['pull', '-R', dest])
|
||||
out, err, code = _hg_command(module, ['pull', '-R', dest])
|
||||
if "no changes found" in out:
|
||||
return True
|
||||
else:
|
||||
raise HgError(err2, stderr=out)
|
||||
else:
|
||||
out, err, code = _hg_command(['tip', '-R', dest])
|
||||
out, err, code = _hg_command(module, ['tip', '-R', dest])
|
||||
if revision in out: # revision should be part of the output (changeset: $revision ...)
|
||||
return True
|
||||
else:
|
||||
|
@ -185,45 +183,45 @@ def _post_op_hg_revision_check(dest, revision):
|
|||
else: # hg tip is fine, but tip != revision
|
||||
raise HgError(err2, stderr=out)
|
||||
|
||||
def force_and_clean(dest):
|
||||
_hg_discard(dest)
|
||||
_hg_purge(dest)
|
||||
def force_and_clean(module, dest):
|
||||
_hg_discard(module, dest)
|
||||
_hg_purge(module, dest)
|
||||
|
||||
def pull_and_update(repo, dest, revision, force):
|
||||
def pull_and_update(module, repo, dest, revision, force):
|
||||
if force == 'yes':
|
||||
force_and_clean(dest)
|
||||
force_and_clean(module, dest)
|
||||
|
||||
if _hg_verify(dest):
|
||||
if _hg_verify(module, dest):
|
||||
cmd1 = ['pull', '-R', dest, '-r', revision]
|
||||
out, err, code = _hg_command(cmd1)
|
||||
out, err, code = _hg_command(module, cmd1)
|
||||
|
||||
if code == 1:
|
||||
raise HgError("Unable to perform pull on %s" % dest, stderr=err)
|
||||
elif code == 0:
|
||||
cmd2 = ['update', '-R', dest, '-r', revision]
|
||||
out, err, code = _hg_command(cmd2)
|
||||
out, err, code = _hg_command(module, cmd2)t
|
||||
if code == 1:
|
||||
raise HgError("There are unresolved files in %s" % dest, stderr=err)
|
||||
elif code == 0:
|
||||
# so far pull and update seems to be working, check revision and $revision are equal
|
||||
_post_op_hg_revision_check(dest, revision)
|
||||
_post_op_hg_revision_check(module, dest, revision)
|
||||
return True
|
||||
# when code aren't 1 or 0 in either command
|
||||
raise HgError("", stderr=err)
|
||||
|
||||
def clone(repo, dest, revision, force):
|
||||
def clone(module, repo, dest, revision, force):
|
||||
if os.path.exists(dest):
|
||||
if _hg_verify(dest): # make sure it's a real repo
|
||||
if _post_op_hg_revision_check(dest, revision): # make sure revision and $revision are equal
|
||||
if _hg_verify(module, dest): # make sure it's a real repo
|
||||
if _post_op_hg_revision_check(module, dest, revision): # make sure revision and $revision are equal
|
||||
if force == 'yes':
|
||||
force_and_clean(dest)
|
||||
force_and_clean(module, dest)
|
||||
return False
|
||||
|
||||
cmd = ['clone', repo, dest, '-r', revision]
|
||||
out, err, code = _hg_command(cmd)
|
||||
out, err, code = _hg_command(module, cmd)
|
||||
if code == 0:
|
||||
_hg_verify(dest)
|
||||
_post_op_hg_revision_check(dest, revision)
|
||||
_hg_verify(module, dest)
|
||||
_post_op_hg_revision_check(module, dest, revision)
|
||||
return True
|
||||
else:
|
||||
raise HgError(err, stderr='')
|
||||
|
@ -250,15 +248,11 @@ def main():
|
|||
shutil.rmtree(dest)
|
||||
changed = True
|
||||
elif state == 'present':
|
||||
changed = clone(repo, dest, revision, force)
|
||||
changed = clone(module, repo, dest, revision, force)
|
||||
elif state == 'latest':
|
||||
changed = pull_and_update(repo, dest, revision, force)
|
||||
changed = pull_and_update(module, repo, dest, revision, force)
|
||||
|
||||
module.exit_json(dest=dest, changed=changed)
|
||||
#except HgError as e:
|
||||
# module.fail_json(msg=str(e), params=module.params)
|
||||
#except IOError as e:
|
||||
# module.fail_json(msg=str(e), params=module.params)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e), params=module.params)
|
||||
|
||||
|
|
14
ini_file
14
ini_file
|
@ -153,8 +153,6 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese
|
|||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
# not checking because of daisy chain to file module
|
||||
check_invalid_arguments = False,
|
||||
argument_spec = dict(
|
||||
dest = dict(required=True),
|
||||
section = dict(required=True),
|
||||
|
@ -162,7 +160,8 @@ def main():
|
|||
value = dict(required=False),
|
||||
backup = dict(default='no', choices=BOOLEANS),
|
||||
state = dict(default='present', choices=['present', 'absent'])
|
||||
)
|
||||
),
|
||||
add_file_common_args = True
|
||||
)
|
||||
|
||||
info = dict()
|
||||
|
@ -176,14 +175,11 @@ def main():
|
|||
|
||||
changed = do_ini(module, dest, section, option, value, state, backup)
|
||||
|
||||
info['daisychain_args'] = module.params
|
||||
info['daisychain_args']['state'] = 'file'
|
||||
info['daisychain_args']['dest'] = dest
|
||||
file_args = module.load_file_common_arguments(module.params)
|
||||
changed = module.set_file_attributes_if_different(file_args, changed)
|
||||
|
||||
# Mission complete
|
||||
module.exit_json(dest=dest,
|
||||
changed=changed, msg="OK",
|
||||
daisychain="file", daisychain_args=info.get('daisychain_args',''))
|
||||
module.exit_json(dest=dest, changed=changed, msg="OK")
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
|
|
140
pkgin
Executable file
140
pkgin
Executable file
|
@ -0,0 +1,140 @@
|
|||
#!/usr/bin/python -tt
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2013, Shaun Zinck
|
||||
# Written by Shaun Zinck <shaun.zinck at gmail.com>
|
||||
# Based on pacman module written by Afterburn <http://github.com/afterburn>
|
||||
# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
|
||||
#
|
||||
# This module is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: pkgin
|
||||
short_description: Package manager for SmartOS
|
||||
description:
|
||||
- Manages SmartOS packages
|
||||
version_added: "1.0"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- name of package to install/remove
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- state of the package
|
||||
choices: [ 'present', 'absent' ]
|
||||
required: false
|
||||
default: present
|
||||
author: Shaun Zinck
|
||||
notes: []
|
||||
examples:
|
||||
- code: "pkgin: name=foo state=present"
|
||||
description: install package foo"
|
||||
- code: "pkgin: name=foo state=absent"
|
||||
description: remove package foo
|
||||
- code: "pkgin: name=foo,bar state=absent"
|
||||
description: remove packages foo and bar
|
||||
'''
|
||||
|
||||
|
||||
import json
|
||||
import shlex
|
||||
import os
|
||||
import sys
|
||||
|
||||
PKGIN_PATH = "/opt/local/bin/pkgin"
|
||||
|
||||
def query_package(module, name, state="present"):
|
||||
|
||||
if state == "present":
|
||||
|
||||
rc, out, err = module.run_command("%s list | grep ^%s" % (PKGIN_PATH, name))
|
||||
|
||||
if rc == 0:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def remove_packages(module, packages):
|
||||
|
||||
remove_c = 0
|
||||
# Using a for loop incase of error, we can report the package that failed
|
||||
for package in packages:
|
||||
# Query the package first, to see if we even need to remove
|
||||
if not query_package(module, package):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s -y remove %s" % (PKGIN_PATH, package))
|
||||
|
||||
if query_package(module, package):
|
||||
module.fail_json(msg="failed to remove %s: %s" % (package, out))
|
||||
|
||||
remove_c += 1
|
||||
|
||||
if remove_c > 0:
|
||||
|
||||
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
|
||||
|
||||
module.exit_json(changed=False, msg="package(s) already absent")
|
||||
|
||||
|
||||
def install_packages(module, packages):
|
||||
|
||||
install_c = 0
|
||||
|
||||
for package in packages:
|
||||
if query_package(module, package):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s -y install %s" % (PKGIN_PATH, package))
|
||||
|
||||
if not query_package(module, package):
|
||||
module.fail_json(msg="failed to install %s: %s" % (package, out))
|
||||
|
||||
install_c += 1
|
||||
|
||||
if install_c > 0:
|
||||
module.exit_json(changed=True, msg="present %s package(s)" % (install_c))
|
||||
|
||||
module.exit_json(changed=False, msg="package(s) already present")
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
state = dict(default="present", choices=["present","absent"]),
|
||||
name = dict(aliases=["pkg"], required=True)))
|
||||
|
||||
|
||||
if not os.path.exists(PKGIN_PATH):
|
||||
module.fail_json(msg="cannot find pkgin, looking for %s" % (PKGIN_PATH))
|
||||
|
||||
p = module.params
|
||||
|
||||
pkgs = p["name"].split(",")
|
||||
|
||||
if p["state"] == "present":
|
||||
install_packages(module, pkgs)
|
||||
|
||||
elif p["state"] == "absent":
|
||||
remove_packages(module, pkgs)
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
|
||||
main()
|
9
setup
9
setup
|
@ -82,10 +82,11 @@ class Facts(object):
|
|||
# A list of dicts. If there is a platform with more than one
|
||||
# package manager, put the preferred one last. If there is an
|
||||
# ansible module, use that as the value for the 'name' key.
|
||||
PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
|
||||
{ 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
|
||||
{ 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
|
||||
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' } ]
|
||||
PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
|
||||
{ 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
|
||||
{ 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
|
||||
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
|
||||
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' } ]
|
||||
|
||||
def __init__(self):
|
||||
self.facts = {}
|
||||
|
|
26
sysctl
26
sysctl
|
@ -24,47 +24,43 @@ DOCUMENTATION = '''
|
|||
module: sysctl
|
||||
short_description: Permit to handle sysctl.conf entries
|
||||
description:
|
||||
- This module handle the entries in C(/etc/sysctl.conf),
|
||||
and perform a I(/sbin/sysctl -p) after any change
|
||||
- This module manipulates sysctl entries and performs a I(/sbin/sysctl -p) after changing them.
|
||||
version_added: "0.6"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- |
|
||||
also known as "key",
|
||||
this is the short path, point separated to the sysctl entry eg: C(vm.swappiness)"
|
||||
- this is the short path, decimal seperated, to the sysctl entry
|
||||
required: true
|
||||
default: null
|
||||
aliases: [ 'key' ]
|
||||
value:
|
||||
description:
|
||||
- "value to affect to the sysctl entry, to not provide if state=absent"
|
||||
- set the sysctl value to this entry
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ 'val' ]
|
||||
state:
|
||||
description:
|
||||
- state=present the entry is added if not exist, or updated if exist
|
||||
state=absent the entry is removed if exist
|
||||
- whether the entry should be present or absent
|
||||
choices: [ "present", "absent" ]
|
||||
default: present
|
||||
checks:
|
||||
description:
|
||||
- C(checks)=I(none) no smart/facultative checks will be made
|
||||
C(checks)=I(before) some checks performed before any update (ie. does the sysctl key is writable ?)
|
||||
C(checks)=I(after) some checks performed after an update (ie. does kernel give back the setted value ?)
|
||||
C(checks)=I(both) all the smart checks I(before and after) are performed
|
||||
- if C(checks)=I(none) no smart/facultative checks will be made
|
||||
- if C(checks)=I(before) some checks performed before any update (ie. does the sysctl key is writable ?)
|
||||
- if C(checks)=I(after) some checks performed after an update (ie. does kernel give back the setted value ?)
|
||||
- if C(checks)=I(both) all the smart checks I(before and after) are performed
|
||||
choices: [ "none", "before", "after", "both" ]
|
||||
default: both
|
||||
reload:
|
||||
description:
|
||||
- C(reload=yes) perform a I(/sbin/sysctl -p) if C(sysctl_file) updated !
|
||||
C(reload=no) do not reload I(sysctl) even if C(sysctl_file) updated !
|
||||
- if C(reload=yes), performs a I(/sbin/sysctl -p) if the C(sysctl_file) is updated
|
||||
- if C(reload=no), does not reload I(sysctl) even if the C(sysctl_file) is updated
|
||||
choices: [ yes, no ]
|
||||
default: yes
|
||||
sysctl_file:
|
||||
description:
|
||||
- specify the absolute path to C(/etc/sysctl.conf)
|
||||
- specifies the absolute path to C(sysctl.conf), if not /etc/sysctl.conf
|
||||
required: false
|
||||
default: /etc/sysctl.conf
|
||||
examples:
|
||||
|
|
Loading…
Reference in a new issue