Merge branch 'devel' of github.com:ansible/ansible-modules-extras into devel
This commit is contained in:
commit
12ef9aaae0
74 changed files with 3244 additions and 88 deletions
10
README.md
10
README.md
|
@ -5,7 +5,15 @@ This repo contains a subset of ansible-modules with slightly lower use or priori
|
|||
|
||||
All new modules should be submitted here, and have a chance to be promoted to core over time.
|
||||
|
||||
Take care to submit tickets to the appropriate repo where modules are contained. The docs.ansible.com website indicates this at the bottom of each module documentation page.
|
||||
Reporting bugs
|
||||
==============
|
||||
|
||||
Take care to submit tickets to the appropriate repo where modules are contained. The repo is mentioned at the bottom of modlue documentation page at [docs.ansible.com](http://docs.ansible.com/).
|
||||
|
||||
Testing modules
|
||||
===============
|
||||
|
||||
Ansible [module development guide](http://docs.ansible.com/developing_modules.html#testing-modules) contains the latest info about that.
|
||||
|
||||
License
|
||||
=======
|
||||
|
|
|
@ -205,6 +205,8 @@ action: ovirt >
|
|||
|
||||
|
||||
'''
|
||||
import sys
|
||||
|
||||
try:
|
||||
from ovirtsdk.api import API
|
||||
from ovirtsdk.xml import params
|
0
database/misc/__init__.py
Normal file
0
database/misc/__init__.py
Normal file
|
@ -67,6 +67,11 @@ options:
|
|||
- The password to use for the user
|
||||
required: false
|
||||
default: null
|
||||
ssl:
|
||||
version_added: "1.8"
|
||||
description:
|
||||
- Whether to use an SSL connection when connecting to the database
|
||||
default: False
|
||||
roles:
|
||||
version_added: "1.3"
|
||||
description:
|
||||
|
@ -92,6 +97,9 @@ EXAMPLES = '''
|
|||
# Create 'burgers' database user with name 'bob' and password '12345'.
|
||||
- mongodb_user: database=burgers name=bob password=12345 state=present
|
||||
|
||||
# Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly)
|
||||
- mongodb_user: database=burgers name=bob password=12345 state=present ssl=True
|
||||
|
||||
# Delete 'burgers' database user with name 'bob'.
|
||||
- mongodb_user: database=burgers name=bob state=absent
|
||||
|
||||
|
@ -172,6 +180,7 @@ def main():
|
|||
database=dict(required=True, aliases=['db']),
|
||||
user=dict(required=True, aliases=['name']),
|
||||
password=dict(aliases=['pass']),
|
||||
ssl=dict(default=False),
|
||||
roles=dict(default=None, type='list'),
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
)
|
||||
|
@ -188,14 +197,15 @@ def main():
|
|||
db_name = module.params['database']
|
||||
user = module.params['user']
|
||||
password = module.params['password']
|
||||
ssl = module.params['ssl']
|
||||
roles = module.params['roles']
|
||||
state = module.params['state']
|
||||
|
||||
try:
|
||||
if replica_set:
|
||||
client = MongoClient(login_host, int(login_port), replicaset=replica_set)
|
||||
client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl)
|
||||
else:
|
||||
client = MongoClient(login_host, int(login_port))
|
||||
client = MongoClient(login_host, int(login_port), ssl=ssl)
|
||||
|
||||
# try to authenticate as a target user to check if it already exists
|
||||
try:
|
0
database/mysql/__init__.py
Normal file
0
database/mysql/__init__.py
Normal file
|
@ -159,9 +159,10 @@ def start_slave(cursor):
|
|||
return started
|
||||
|
||||
|
||||
def changemaster(cursor, chm):
|
||||
SQLPARAM = ",".join(chm)
|
||||
cursor.execute("CHANGE MASTER TO " + SQLPARAM)
|
||||
def changemaster(cursor, chm, chm_params):
|
||||
sql_param = ",".join(chm)
|
||||
query = 'CHANGE MASTER TO %s' % sql_param
|
||||
cursor.execute(query, chm_params)
|
||||
|
||||
|
||||
def strip_quotes(s):
|
||||
|
@ -318,37 +319,52 @@ def main():
|
|||
elif mode in "changemaster":
|
||||
print "Change master"
|
||||
chm=[]
|
||||
chm_params = {}
|
||||
if master_host:
|
||||
chm.append("MASTER_HOST='" + master_host + "'")
|
||||
chm.append("MASTER_HOST=%(master_host)s")
|
||||
chm_params['master_host'] = master_host
|
||||
if master_user:
|
||||
chm.append("MASTER_USER='" + master_user + "'")
|
||||
chm.append("MASTER_USER=%(master_user)s")
|
||||
chm_params['master_user'] = master_user
|
||||
if master_password:
|
||||
chm.append("MASTER_PASSWORD='" + master_password + "'")
|
||||
chm.append("MASTER_PASSWORD=%(master_password)s")
|
||||
chm_params['master_password'] = master_password
|
||||
if master_port:
|
||||
chm.append("MASTER_PORT=" + master_port)
|
||||
chm.append("MASTER_PORT=%(master_port)s")
|
||||
chm_params['master_port'] = master_port
|
||||
if master_connect_retry:
|
||||
chm.append("MASTER_CONNECT_RETRY='" + master_connect_retry + "'")
|
||||
chm.append("MASTER_CONNECT_RETRY=%(master_connect_retry)s")
|
||||
chm_params['master_connect_retry'] = master_connect_retry
|
||||
if master_log_file:
|
||||
chm.append("MASTER_LOG_FILE='" + master_log_file + "'")
|
||||
chm.append("MASTER_LOG_FILE=%(master_log_file)s")
|
||||
chm_params['master_log_file'] = master_log_file
|
||||
if master_log_pos:
|
||||
chm.append("MASTER_LOG_POS=" + master_log_pos)
|
||||
chm.append("MASTER_LOG_POS=%(master_log_pos)s")
|
||||
chm_params['master_log_pos'] = master_log_pos
|
||||
if relay_log_file:
|
||||
chm.append("RELAY_LOG_FILE='" + relay_log_file + "'")
|
||||
chm.append("RELAY_LOG_FILE=%(relay_log_file)s")
|
||||
chm_params['relay_log_file'] = relay_log_file
|
||||
if relay_log_pos:
|
||||
chm.append("RELAY_LOG_POS=" + relay_log_pos)
|
||||
chm.append("RELAY_LOG_POS=%(relay_log_pos)s")
|
||||
chm_params['relay_log_pos'] = relay_log_pos
|
||||
if master_ssl:
|
||||
chm.append("MASTER_SSL=1")
|
||||
if master_ssl_ca:
|
||||
chm.append("MASTER_SSL_CA='" + master_ssl_ca + "'")
|
||||
chm.append("MASTER_SSL_CA=%(master_ssl_ca)s")
|
||||
chm_params['master_ssl_ca'] = master_ssl_ca
|
||||
if master_ssl_capath:
|
||||
chm.append("MASTER_SSL_CAPATH='" + master_ssl_capath + "'")
|
||||
chm.append("MASTER_SSL_CAPATH=%(master_ssl_capath)s")
|
||||
chm_params['master_ssl_capath'] = master_ssl_capath
|
||||
if master_ssl_cert:
|
||||
chm.append("MASTER_SSL_CERT='" + master_ssl_cert + "'")
|
||||
chm.append("MASTER_SSL_CERT=%(master_ssl_cert)s")
|
||||
chm_params['master_ssl_cert'] = master_ssl_cert
|
||||
if master_ssl_key:
|
||||
chm.append("MASTER_SSL_KEY='" + master_ssl_key + "'")
|
||||
chm.append("MASTER_SSL_KEY=%(master_ssl_key)s")
|
||||
chm_params['master_ssl_key'] = master_ssl_key
|
||||
if master_ssl_cipher:
|
||||
chm.append("MASTER_SSL_CIPHER='" + master_ssl_cipher + "'")
|
||||
changemaster(cursor,chm)
|
||||
chm.append("MASTER_SSL_CIPHER=%(master_ssl_cipher)s")
|
||||
chm_params['master_ssl_cipher'] = master_ssl_cipher
|
||||
changemaster(cursor, chm, chm_params)
|
||||
module.exit_json(changed=True)
|
||||
elif mode in "startslave":
|
||||
started = start_slave(cursor)
|
||||
|
@ -366,4 +382,4 @@ def main():
|
|||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
||||
warnings.simplefilter("ignore")
|
||||
warnings.simplefilter("ignore")
|
167
monitoring/uptimerobot.py
Normal file
167
monitoring/uptimerobot.py
Normal file
|
@ -0,0 +1,167 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
DOCUMENTATION = '''
|
||||
|
||||
module: uptimerobot
|
||||
short_description: Pause and start Uptime Robot monitoring
|
||||
description:
|
||||
- This module will let you start and pause Uptime Robot Monitoring
|
||||
author: Nate Kingsley
|
||||
requirements:
|
||||
- Valid Uptime Robot API Key
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Define whether or not the monitor should be running or paused.
|
||||
required: true
|
||||
default: null
|
||||
choices: [ "started", "paused" ]
|
||||
aliases: []
|
||||
monitorid:
|
||||
description:
|
||||
- ID of the monitor to check.
|
||||
required: true
|
||||
default: null
|
||||
choices: []
|
||||
aliases: []
|
||||
apikey:
|
||||
description:
|
||||
- Uptime Robot API key.
|
||||
required: true
|
||||
default: null
|
||||
choices: []
|
||||
aliases: []
|
||||
notes:
|
||||
- Support for adding and removing monitors and alert contacts has not yet been implemented.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Pause the monitor with an ID of 12345.
|
||||
- uptimerobot: monitorid=12345
|
||||
apikey=12345-1234512345
|
||||
state=paused
|
||||
|
||||
# Start the monitor with an ID of 12345.
|
||||
- uptimerobot: monitorid=12345
|
||||
apikey=12345-1234512345
|
||||
state=started
|
||||
|
||||
'''
|
||||
|
||||
import json
|
||||
import urllib
|
||||
import urllib2
|
||||
import time
|
||||
|
||||
API_BASE = "http://api.uptimerobot.com/"
|
||||
|
||||
API_ACTIONS = dict(
|
||||
status='getMonitors?',
|
||||
editMonitor='editMonitor?'
|
||||
)
|
||||
|
||||
API_FORMAT = 'json'
|
||||
|
||||
API_NOJSONCALLBACK = 1
|
||||
|
||||
CHANGED_STATE = False
|
||||
|
||||
SUPPORTS_CHECK_MODE = False
|
||||
|
||||
def checkID(params):
|
||||
|
||||
data = urllib.urlencode(params)
|
||||
|
||||
full_uri = API_BASE + API_ACTIONS['status'] + data
|
||||
|
||||
req = urllib2.urlopen(full_uri)
|
||||
|
||||
result = req.read()
|
||||
|
||||
jsonresult = json.loads(result)
|
||||
|
||||
req.close()
|
||||
|
||||
return jsonresult
|
||||
|
||||
|
||||
def startMonitor(params):
|
||||
|
||||
params['monitorStatus'] = 1
|
||||
|
||||
data = urllib.urlencode(params)
|
||||
|
||||
full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
|
||||
|
||||
req = urllib2.urlopen(full_uri)
|
||||
|
||||
result = req.read()
|
||||
|
||||
jsonresult = json.loads(result)
|
||||
|
||||
req.close()
|
||||
|
||||
return jsonresult['stat']
|
||||
|
||||
|
||||
def pauseMonitor(params):
|
||||
|
||||
params['monitorStatus'] = 0
|
||||
|
||||
data = urllib.urlencode(params)
|
||||
|
||||
full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
|
||||
|
||||
req = urllib2.urlopen(full_uri)
|
||||
|
||||
result = req.read()
|
||||
|
||||
jsonresult = json.loads(result)
|
||||
|
||||
req.close()
|
||||
|
||||
return jsonresult['stat']
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
state = dict(required=True, choices=['started', 'paused']),
|
||||
apikey = dict(required=True),
|
||||
monitorid = dict(required=True)
|
||||
),
|
||||
supports_check_mode=SUPPORTS_CHECK_MODE
|
||||
)
|
||||
|
||||
params = dict(
|
||||
apiKey=module.params['apikey'],
|
||||
monitors=module.params['monitorid'],
|
||||
monitorID=module.params['monitorid'],
|
||||
format=API_FORMAT,
|
||||
noJsonCallback=API_NOJSONCALLBACK
|
||||
)
|
||||
|
||||
check_result = checkID(params)
|
||||
|
||||
if check_result['stat'] != "ok":
|
||||
module.fail_json(
|
||||
msg="failed",
|
||||
result=check_result['message']
|
||||
)
|
||||
|
||||
if module.params['state'] == 'started':
|
||||
monitor_result = startMonitor(params)
|
||||
else:
|
||||
monitor_result = pauseMonitor(params)
|
||||
|
||||
|
||||
|
||||
module.exit_json(
|
||||
msg="success",
|
||||
result=monitor_result
|
||||
)
|
||||
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
212
monitoring/zabbix_group.py
Normal file
212
monitoring/zabbix_group.py
Normal file
|
@ -0,0 +1,212 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2014, René Moser <mail@renemoser.net>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: zabbix_group
|
||||
short_description: Add or remove a host group to Zabbix.
|
||||
description:
|
||||
- This module uses the Zabbix API to add and remove host groups.
|
||||
version_added: '1.8'
|
||||
requirements: [ 'zabbix-api' ]
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Whether the host group should be added or removed.
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
host_group:
|
||||
description:
|
||||
- Name of the host groupto be added or removed.
|
||||
required: true
|
||||
default: null
|
||||
aliases: [ ]
|
||||
server_url:
|
||||
description:
|
||||
- Url of Zabbix server, with protocol (http or https) e.g.
|
||||
https://monitoring.example.com/zabbix. C(url) is an alias
|
||||
for C(server_url). If not set environment variable
|
||||
C(ZABBIX_SERVER_URL) is used.
|
||||
required: true
|
||||
default: null
|
||||
aliases: [ 'url' ]
|
||||
login_user:
|
||||
description:
|
||||
- Zabbix user name. If not set environment variable
|
||||
C(ZABBIX_LOGIN_USER) is used.
|
||||
required: true
|
||||
default: null
|
||||
login_password:
|
||||
description:
|
||||
- Zabbix user password. If not set environment variable
|
||||
C(ZABBIX_LOGIN_PASSWORD) is used.
|
||||
required: true
|
||||
notes:
|
||||
- The module has been tested with Zabbix Server 2.2.
|
||||
author: René Moser
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Add a new host group to Zabbix
|
||||
- zabbix_group: host_group='Linux servers'
|
||||
server_url=https://monitoring.example.com/zabbix
|
||||
login_user=ansible
|
||||
login_password=secure
|
||||
|
||||
# Add a new host group, login data is provided by environment variables:
|
||||
# ZABBIX_LOGIN_USER, ZABBIX_LOGIN_PASSWORD, ZABBIX_SERVER_URL:
|
||||
- zabbix_group: host_group=Webservers
|
||||
|
||||
# Remove a host group from Zabbix
|
||||
- zabbix_group: host_group='Linux servers'
|
||||
state=absent
|
||||
server_url=https://monitoring.example.com/zabbix
|
||||
login_user=ansible
|
||||
login_password=secure
|
||||
'''
|
||||
|
||||
try:
|
||||
from zabbix_api import ZabbixAPI
|
||||
HAS_ZABBIX_API = True
|
||||
except ImportError:
|
||||
HAS_ZABBIX_API = False
|
||||
|
||||
|
||||
def create_group(zbx, host_group):
|
||||
try:
|
||||
result = zbx.hostgroup.create(
|
||||
{
|
||||
'name': host_group
|
||||
}
|
||||
)
|
||||
except BaseException as e:
|
||||
return 1, None, str(e)
|
||||
return 0, result['groupids'], None
|
||||
|
||||
|
||||
def get_group(zbx, host_group):
|
||||
try:
|
||||
result = zbx.hostgroup.get(
|
||||
{
|
||||
'filter':
|
||||
{
|
||||
'name': host_group,
|
||||
}
|
||||
}
|
||||
)
|
||||
except BaseException as e:
|
||||
return 1, None, str(e)
|
||||
|
||||
return 0, result[0]['groupid'], None
|
||||
|
||||
|
||||
def delete_group(zbx, group_id):
|
||||
try:
|
||||
zbx.hostgroup.delete([ group_id ])
|
||||
except BaseException as e:
|
||||
return 1, None, str(e)
|
||||
return 0, None, None
|
||||
|
||||
|
||||
def check_group(zbx, host_group):
|
||||
try:
|
||||
result = zbx.hostgroup.exists(
|
||||
{
|
||||
'name': host_group
|
||||
}
|
||||
)
|
||||
except BaseException as e:
|
||||
return 1, None, str(e)
|
||||
return 0, result, None
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
host_group=dict(required=True, default=None),
|
||||
server_url=dict(default=None, aliases=['url']),
|
||||
login_user=dict(default=None),
|
||||
login_password=dict(default=None),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not HAS_ZABBIX_API:
|
||||
module.fail_json(msg='Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)')
|
||||
|
||||
try:
|
||||
login_user = module.params['login_user'] or os.environ['ZABBIX_LOGIN_USER']
|
||||
login_password = module.params['login_password'] or os.environ['ZABBIX_LOGIN_PASSWORD']
|
||||
server_url = module.params['server_url'] or os.environ['ZABBIX_SERVER_URL']
|
||||
except KeyError, e:
|
||||
module.fail_json(msg='Missing login data: %s is not set.' % e.message)
|
||||
|
||||
host_group = module.params['host_group']
|
||||
state = module.params['state']
|
||||
|
||||
try:
|
||||
zbx = ZabbixAPI(server_url)
|
||||
zbx.login(login_user, login_password)
|
||||
except BaseException as e:
|
||||
module.fail_json(msg='Failed to connect to Zabbix server: %s' % e)
|
||||
|
||||
changed = False
|
||||
msg = ''
|
||||
|
||||
if state == 'present':
|
||||
(rc, exists, error) = check_group(zbx, host_group)
|
||||
if rc != 0:
|
||||
module.fail_json(msg='Failed to check host group %s existance: %s' % (host_group, error))
|
||||
if not exists:
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
else:
|
||||
(rc, group, error) = create_group(zbx, host_group)
|
||||
if rc == 0:
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg='Failed to get host group: %s' % error)
|
||||
|
||||
if state == 'absent':
|
||||
(rc, exists, error) = check_group(zbx, host_group)
|
||||
if rc != 0:
|
||||
module.fail_json(msg='Failed to check host group %s existance: %s' % (host_group, error))
|
||||
if exists:
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
else:
|
||||
(rc, group_id, error) = get_group(zbx, host_group)
|
||||
if rc != 0:
|
||||
module.fail_json(msg='Failed to get host group: %s' % error)
|
||||
|
||||
(rc, _, error) = delete_group(zbx, group_id)
|
||||
if rc == 0:
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg='Failed to remove host group: %s' % error)
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
0
network/__init__.py
Normal file
0
network/__init__.py
Normal file
0
network/a10/__init__.py
Normal file
0
network/a10/__init__.py
Normal file
0
network/citrix/__init__.py
Normal file
0
network/citrix/__init__.py
Normal file
|
@ -97,7 +97,7 @@ author: Alex Coomans
|
|||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# authenicate using email and API token
|
||||
# authenticate using email and API token
|
||||
- local_action: dnsimple account_email=test@example.com account_api_token=dummyapitoken
|
||||
|
||||
# fetch all domains
|
0
network/f5/__init__.py
Normal file
0
network/f5/__init__.py
Normal file
364
network/snmp_facts.py
Executable file
364
network/snmp_facts.py
Executable file
|
@ -0,0 +1,364 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# This file is part of Networklore's snmp library for Ansible
|
||||
#
|
||||
# The module is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# The module is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: snmp_facts
|
||||
version_added: "1.9"
|
||||
author: Patrick Ogenstad (@networklore)
|
||||
short_description: Retrive facts for a device using SNMP.
|
||||
description:
|
||||
- Retrieve facts for a device using SNMP, the facts will be
|
||||
inserted to the ansible_facts key.
|
||||
requirements:
|
||||
- pysnmp
|
||||
options:
|
||||
host:
|
||||
description:
|
||||
- Set to {{ inventory_hostname }}}
|
||||
required: true
|
||||
version:
|
||||
description:
|
||||
- SNMP Version to use, v2/v2c or v3
|
||||
choices: [ 'v2', 'v2c', 'v3' ]
|
||||
required: true
|
||||
community:
|
||||
description:
|
||||
- The SNMP community string, required if version is v2/v2c
|
||||
required: false
|
||||
level:
|
||||
description:
|
||||
- Authentication level, required if version is v3
|
||||
choices: [ 'authPriv', 'authNoPriv' ]
|
||||
required: false
|
||||
username:
|
||||
description:
|
||||
- Username for SNMPv3, required if version is v3
|
||||
required: false
|
||||
integrity:
|
||||
description:
|
||||
- Hashing algoritm, required if version is v3
|
||||
choices: [ 'md5', 'sha' ]
|
||||
required: false
|
||||
authkey:
|
||||
description:
|
||||
- Authentication key, required if version is v3
|
||||
required: false
|
||||
privacy:
|
||||
description:
|
||||
- Encryption algoritm, required if level is authPriv
|
||||
choices: [ 'des', 'aes' ]
|
||||
required: false
|
||||
privkey:
|
||||
description:
|
||||
- Encryption key, required if version is authPriv
|
||||
required: false
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Gather facts with SNMP version 2
|
||||
- snmp_facts: host={{ inventory_hostname }} version=2c community=public
|
||||
|
||||
# Gather facts using SNMP version 3
|
||||
- snmp_facts:
|
||||
host={{ inventory_hostname }}
|
||||
version=v3
|
||||
level=authPriv
|
||||
integrity=sha
|
||||
privacy=aes
|
||||
username=snmp-user
|
||||
authkey=abc12345
|
||||
privkey=def6789
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from collections import defaultdict
|
||||
|
||||
try:
|
||||
from pysnmp.entity.rfc3413.oneliner import cmdgen
|
||||
has_pysnmp = True
|
||||
except:
|
||||
has_pysnmp = False
|
||||
|
||||
class DefineOid(object):
|
||||
|
||||
def __init__(self,dotprefix=False):
|
||||
if dotprefix:
|
||||
dp = "."
|
||||
else:
|
||||
dp = ""
|
||||
|
||||
# From SNMPv2-MIB
|
||||
self.sysDescr = dp + "1.3.6.1.2.1.1.1.0"
|
||||
self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0"
|
||||
self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0"
|
||||
self.sysContact = dp + "1.3.6.1.2.1.1.4.0"
|
||||
self.sysName = dp + "1.3.6.1.2.1.1.5.0"
|
||||
self.sysLocation = dp + "1.3.6.1.2.1.1.6.0"
|
||||
|
||||
# From IF-MIB
|
||||
self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1"
|
||||
self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2"
|
||||
self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4"
|
||||
self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5"
|
||||
self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6"
|
||||
self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7"
|
||||
self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8"
|
||||
self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18"
|
||||
|
||||
# From IP-MIB
|
||||
self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1"
|
||||
self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2"
|
||||
self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3"
|
||||
|
||||
|
||||
def decode_hex(hexstring):
|
||||
|
||||
if len(hexstring) < 3:
|
||||
return hexstring
|
||||
if hexstring[:2] == "0x":
|
||||
return hexstring[2:].decode("hex")
|
||||
else:
|
||||
return hexstring
|
||||
|
||||
def decode_mac(hexstring):
|
||||
|
||||
if len(hexstring) != 14:
|
||||
return hexstring
|
||||
if hexstring[:2] == "0x":
|
||||
return hexstring[2:]
|
||||
else:
|
||||
return hexstring
|
||||
|
||||
def lookup_adminstatus(int_adminstatus):
|
||||
adminstatus_options = {
|
||||
1: 'up',
|
||||
2: 'down',
|
||||
3: 'testing'
|
||||
}
|
||||
if int_adminstatus in adminstatus_options.keys():
|
||||
return adminstatus_options[int_adminstatus]
|
||||
else:
|
||||
return ""
|
||||
|
||||
def lookup_operstatus(int_operstatus):
|
||||
operstatus_options = {
|
||||
1: 'up',
|
||||
2: 'down',
|
||||
3: 'testing',
|
||||
4: 'unknown',
|
||||
5: 'dormant',
|
||||
6: 'notPresent',
|
||||
7: 'lowerLayerDown'
|
||||
}
|
||||
if int_operstatus in operstatus_options.keys():
|
||||
return operstatus_options[int_operstatus]
|
||||
else:
|
||||
return ""
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
host=dict(required=True),
|
||||
version=dict(required=True, choices=['v2', 'v2c', 'v3']),
|
||||
community=dict(required=False, default=False),
|
||||
username=dict(required=False),
|
||||
level=dict(required=False, choices=['authNoPriv', 'authPriv']),
|
||||
integrity=dict(required=False, choices=['md5', 'sha']),
|
||||
privacy=dict(required=False, choices=['des', 'aes']),
|
||||
authkey=dict(required=False),
|
||||
privkey=dict(required=False),
|
||||
removeplaceholder=dict(required=False)),
|
||||
required_together = ( ['username','level','integrity','authkey'],['privacy','privkey'],),
|
||||
supports_check_mode=False)
|
||||
|
||||
m_args = module.params
|
||||
|
||||
if not has_pysnmp:
|
||||
module.fail_json(msg='Missing required pysnmp module (check docs)')
|
||||
|
||||
cmdGen = cmdgen.CommandGenerator()
|
||||
|
||||
# Verify that we receive a community when using snmp v2
|
||||
if m_args['version'] == "v2" or m_args['version'] == "v2c":
|
||||
if m_args['community'] == False:
|
||||
module.fail_json(msg='Community not set when using snmp version 2')
|
||||
|
||||
if m_args['version'] == "v3":
|
||||
if m_args['username'] == None:
|
||||
module.fail_json(msg='Username not set when using snmp version 3')
|
||||
|
||||
if m_args['level'] == "authPriv" and m_args['privacy'] == None:
|
||||
module.fail_json(msg='Privacy algorithm not set when using authPriv')
|
||||
|
||||
|
||||
if m_args['integrity'] == "sha":
|
||||
integrity_proto = cmdgen.usmHMACSHAAuthProtocol
|
||||
elif m_args['integrity'] == "md5":
|
||||
integrity_proto = cmdgen.usmHMACMD5AuthProtocol
|
||||
|
||||
if m_args['privacy'] == "aes":
|
||||
privacy_proto = cmdgen.usmAesCfb128Protocol
|
||||
elif m_args['privacy'] == "des":
|
||||
privacy_proto = cmdgen.usmDESPrivProtocol
|
||||
|
||||
# Use SNMP Version 2
|
||||
if m_args['version'] == "v2" or m_args['version'] == "v2c":
|
||||
snmp_auth = cmdgen.CommunityData(m_args['community'])
|
||||
|
||||
# Use SNMP Version 3 with authNoPriv
|
||||
elif m_args['level'] == "authNoPriv":
|
||||
snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto)
|
||||
|
||||
# Use SNMP Version 3 with authPriv
|
||||
else:
|
||||
snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto, privProtocol=privacy_proto)
|
||||
|
||||
# Use p to prefix OIDs with a dot for polling
|
||||
p = DefineOid(dotprefix=True)
|
||||
# Use v without a prefix to use with return values
|
||||
v = DefineOid(dotprefix=False)
|
||||
|
||||
Tree = lambda: defaultdict(Tree)
|
||||
|
||||
results = Tree()
|
||||
|
||||
errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
|
||||
snmp_auth,
|
||||
cmdgen.UdpTransportTarget((m_args['host'], 161)),
|
||||
cmdgen.MibVariable(p.sysDescr,),
|
||||
cmdgen.MibVariable(p.sysObjectId,),
|
||||
cmdgen.MibVariable(p.sysUpTime,),
|
||||
cmdgen.MibVariable(p.sysContact,),
|
||||
cmdgen.MibVariable(p.sysName,),
|
||||
cmdgen.MibVariable(p.sysLocation,),
|
||||
)
|
||||
|
||||
|
||||
if errorIndication:
|
||||
module.fail_json(msg=str(errorIndication))
|
||||
|
||||
for oid, val in varBinds:
|
||||
current_oid = oid.prettyPrint()
|
||||
current_val = val.prettyPrint()
|
||||
if current_oid == v.sysDescr:
|
||||
results['ansible_sysdescr'] = decode_hex(current_val)
|
||||
elif current_oid == v.sysObjectId:
|
||||
results['ansible_sysobjectid'] = current_val
|
||||
elif current_oid == v.sysUpTime:
|
||||
results['ansible_sysuptime'] = current_val
|
||||
elif current_oid == v.sysContact:
|
||||
results['ansible_syscontact'] = current_val
|
||||
elif current_oid == v.sysName:
|
||||
results['ansible_sysname'] = current_val
|
||||
elif current_oid == v.sysLocation:
|
||||
results['ansible_syslocation'] = current_val
|
||||
|
||||
errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
|
||||
snmp_auth,
|
||||
cmdgen.UdpTransportTarget((m_args['host'], 161)),
|
||||
cmdgen.MibVariable(p.ifIndex,),
|
||||
cmdgen.MibVariable(p.ifDescr,),
|
||||
cmdgen.MibVariable(p.ifMtu,),
|
||||
cmdgen.MibVariable(p.ifSpeed,),
|
||||
cmdgen.MibVariable(p.ifPhysAddress,),
|
||||
cmdgen.MibVariable(p.ifAdminStatus,),
|
||||
cmdgen.MibVariable(p.ifOperStatus,),
|
||||
cmdgen.MibVariable(p.ipAdEntAddr,),
|
||||
cmdgen.MibVariable(p.ipAdEntIfIndex,),
|
||||
cmdgen.MibVariable(p.ipAdEntNetMask,),
|
||||
|
||||
cmdgen.MibVariable(p.ifAlias,),
|
||||
)
|
||||
|
||||
|
||||
if errorIndication:
|
||||
module.fail_json(msg=str(errorIndication))
|
||||
|
||||
interface_indexes = []
|
||||
|
||||
all_ipv4_addresses = []
|
||||
ipv4_networks = Tree()
|
||||
|
||||
for varBinds in varTable:
|
||||
for oid, val in varBinds:
|
||||
current_oid = oid.prettyPrint()
|
||||
current_val = val.prettyPrint()
|
||||
if v.ifIndex in current_oid:
|
||||
ifIndex = int(current_oid.rsplit('.', 1)[-1])
|
||||
results['ansible_interfaces'][ifIndex]['ifindex'] = current_val
|
||||
interface_indexes.append(ifIndex)
|
||||
if v.ifDescr in current_oid:
|
||||
ifIndex = int(current_oid.rsplit('.', 1)[-1])
|
||||
results['ansible_interfaces'][ifIndex]['name'] = current_val
|
||||
if v.ifMtu in current_oid:
|
||||
ifIndex = int(current_oid.rsplit('.', 1)[-1])
|
||||
results['ansible_interfaces'][ifIndex]['mtu'] = current_val
|
||||
if v.ifMtu in current_oid:
|
||||
ifIndex = int(current_oid.rsplit('.', 1)[-1])
|
||||
results['ansible_interfaces'][ifIndex]['speed'] = current_val
|
||||
if v.ifPhysAddress in current_oid:
|
||||
ifIndex = int(current_oid.rsplit('.', 1)[-1])
|
||||
results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val)
|
||||
if v.ifAdminStatus in current_oid:
|
||||
ifIndex = int(current_oid.rsplit('.', 1)[-1])
|
||||
results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val))
|
||||
if v.ifOperStatus in current_oid:
|
||||
ifIndex = int(current_oid.rsplit('.', 1)[-1])
|
||||
results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val))
|
||||
if v.ipAdEntAddr in current_oid:
|
||||
curIPList = current_oid.rsplit('.', 4)[-4:]
|
||||
curIP = ".".join(curIPList)
|
||||
ipv4_networks[curIP]['address'] = current_val
|
||||
all_ipv4_addresses.append(current_val)
|
||||
if v.ipAdEntIfIndex in current_oid:
|
||||
curIPList = current_oid.rsplit('.', 4)[-4:]
|
||||
curIP = ".".join(curIPList)
|
||||
ipv4_networks[curIP]['interface'] = current_val
|
||||
if v.ipAdEntNetMask in current_oid:
|
||||
curIPList = current_oid.rsplit('.', 4)[-4:]
|
||||
curIP = ".".join(curIPList)
|
||||
ipv4_networks[curIP]['netmask'] = current_val
|
||||
|
||||
if v.ifAlias in current_oid:
|
||||
ifIndex = int(current_oid.rsplit('.', 1)[-1])
|
||||
results['ansible_interfaces'][ifIndex]['description'] = current_val
|
||||
|
||||
interface_to_ipv4 = {}
|
||||
for ipv4_network in ipv4_networks:
|
||||
current_interface = ipv4_networks[ipv4_network]['interface']
|
||||
current_network = {
|
||||
'address': ipv4_networks[ipv4_network]['address'],
|
||||
'netmask': ipv4_networks[ipv4_network]['netmask']
|
||||
}
|
||||
if not current_interface in interface_to_ipv4:
|
||||
interface_to_ipv4[current_interface] = []
|
||||
interface_to_ipv4[current_interface].append(current_network)
|
||||
else:
|
||||
interface_to_ipv4[current_interface].append(current_network)
|
||||
|
||||
for interface in interface_to_ipv4:
|
||||
results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface]
|
||||
|
||||
results['ansible_all_ipv4_addresses'] = all_ipv4_addresses
|
||||
|
||||
module.exit_json(ansible_facts=results)
|
||||
|
||||
|
||||
main()
|
||||
|
|
@ -96,4 +96,5 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
main()
|
||||
|
|
|
@ -105,8 +105,7 @@ EXAMPLES = """
|
|||
|
||||
"""
|
||||
|
||||
|
||||
SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s'
|
||||
SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s'
|
||||
|
||||
def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse):
|
||||
payload = dict(text=text)
|
||||
|
@ -128,11 +127,11 @@ def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoj
|
|||
return payload
|
||||
|
||||
def do_notify_slack(module, domain, token, payload):
|
||||
slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, token)
|
||||
slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (token)
|
||||
|
||||
response, info = fetch_url(module, slack_incoming_webhook, data=payload)
|
||||
if info['status'] != 200:
|
||||
obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, '[obscured]')
|
||||
obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]')
|
||||
module.fail_json(msg=" failed to send %s to %s: %s" % (payload, obscured_incoming_webhook, info['msg']))
|
||||
|
||||
def main():
|
||||
|
@ -170,4 +169,4 @@ def main():
|
|||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
main()
|
||||
main()
|
||||
|
|
0
packaging/__init__.py
Normal file
0
packaging/__init__.py
Normal file
187
packaging/bower.py
Normal file
187
packaging/bower.py
Normal file
|
@ -0,0 +1,187 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2014, Michael Warkentin <mwarkentin@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: bower
|
||||
short_description: Manage bower packages with bower
|
||||
description:
|
||||
- Manage bower packages with bower
|
||||
version_added: 1.7
|
||||
author: Michael Warkentin
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of a bower package to install
|
||||
required: false
|
||||
offline:
|
||||
description:
|
||||
- Install packages from local cache, if the packages were installed before
|
||||
required: false
|
||||
default: no
|
||||
choices: [ "yes", "no" ]
|
||||
path:
|
||||
description:
|
||||
- The base path where to install the bower packages
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- The state of the bower package
|
||||
required: false
|
||||
default: present
|
||||
choices: [ "present", "absent", "latest" ]
|
||||
version:
|
||||
description:
|
||||
- The version to be installed
|
||||
required: false
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
description: Install "bootstrap" bower package.
|
||||
- bower: name=bootstrap
|
||||
|
||||
description: Install "bootstrap" bower package on version 3.1.1.
|
||||
- bower: name=bootstrap version=3.1.1
|
||||
|
||||
description: Remove the "bootstrap" bower package.
|
||||
- bower: name=bootstrap state=absent
|
||||
|
||||
description: Install packages based on bower.json.
|
||||
- bower: path=/app/location
|
||||
|
||||
description: Update packages based on bower.json to their latest version.
|
||||
- bower: path=/app/location state=latest
|
||||
'''
|
||||
|
||||
|
||||
class Bower(object):
|
||||
def __init__(self, module, **kwargs):
|
||||
self.module = module
|
||||
self.name = kwargs['name']
|
||||
self.offline = kwargs['offline']
|
||||
self.path = kwargs['path']
|
||||
self.version = kwargs['version']
|
||||
|
||||
if kwargs['version']:
|
||||
self.name_version = self.name + '#' + self.version
|
||||
else:
|
||||
self.name_version = self.name
|
||||
|
||||
def _exec(self, args, run_in_check_mode=False, check_rc=True):
|
||||
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
|
||||
cmd = ["bower"] + args
|
||||
|
||||
if self.name:
|
||||
cmd.append(self.name_version)
|
||||
|
||||
if self.offline:
|
||||
cmd.append('--offline')
|
||||
|
||||
# If path is specified, cd into that path and run the command.
|
||||
cwd = None
|
||||
if self.path:
|
||||
if not os.path.exists(self.path):
|
||||
os.makedirs(self.path)
|
||||
if not os.path.isdir(self.path):
|
||||
self.module.fail_json(msg="path %s is not a directory" % self.path)
|
||||
cwd = self.path
|
||||
|
||||
rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
|
||||
return out
|
||||
return ''
|
||||
|
||||
def list(self):
|
||||
cmd = ['list', '--json']
|
||||
|
||||
installed = list()
|
||||
missing = list()
|
||||
outdated = list()
|
||||
data = json.loads(self._exec(cmd, True, False))
|
||||
if 'dependencies' in data:
|
||||
for dep in data['dependencies']:
|
||||
if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:
|
||||
missing.append(dep)
|
||||
elif data['dependencies'][dep]['pkgMeta']['version'] != data['dependencies'][dep]['update']['latest']:
|
||||
outdated.append(dep)
|
||||
elif 'incompatible' in data['dependencies'][dep] and data['dependencies'][dep]['incompatible']:
|
||||
outdated.append(dep)
|
||||
else:
|
||||
installed.append(dep)
|
||||
# Named dependency not installed
|
||||
else:
|
||||
missing.append(self.name)
|
||||
|
||||
return installed, missing, outdated
|
||||
|
||||
def install(self):
|
||||
return self._exec(['install'])
|
||||
|
||||
def update(self):
|
||||
return self._exec(['update'])
|
||||
|
||||
def uninstall(self):
|
||||
return self._exec(['uninstall'])
|
||||
|
||||
|
||||
def main():
|
||||
arg_spec = dict(
|
||||
name=dict(default=None),
|
||||
offline=dict(default='no', type='bool'),
|
||||
path=dict(required=True),
|
||||
state=dict(default='present', choices=['present', 'absent', 'latest', ]),
|
||||
version=dict(default=None),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=arg_spec
|
||||
)
|
||||
|
||||
name = module.params['name']
|
||||
offline = module.params['offline']
|
||||
path = module.params['path']
|
||||
state = module.params['state']
|
||||
version = module.params['version']
|
||||
|
||||
if state == 'absent' and not name:
|
||||
module.fail_json(msg='uninstalling a package is only available for named packages')
|
||||
|
||||
bower = Bower(module, name=name, offline=offline, path=path, version=version)
|
||||
|
||||
changed = False
|
||||
if state == 'present':
|
||||
installed, missing, outdated = bower.list()
|
||||
if len(missing):
|
||||
changed = True
|
||||
bower.install()
|
||||
elif state == 'latest':
|
||||
installed, missing, outdated = bower.list()
|
||||
if len(missing) or len(outdated):
|
||||
changed = True
|
||||
bower.update()
|
||||
else: # Absent
|
||||
installed, missing, outdated = bower.list()
|
||||
if name in installed:
|
||||
changed = True
|
||||
bower.uninstall()
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
# Import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
841
packaging/dnf.py
Normal file
841
packaging/dnf.py
Normal file
|
@ -0,0 +1,841 @@
|
|||
#!/usr/bin/python -tt
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Written by Cristian van Ee <cristian at cvee.org>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
|
||||
import traceback
|
||||
import os
|
||||
import dnf
|
||||
|
||||
try:
|
||||
from dnf import find_unfinished_transactions, find_ts_remaining
|
||||
from rpmUtils.miscutils import splitFilename
|
||||
transaction_helpers = True
|
||||
except:
|
||||
transaction_helpers = False
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: dnf
|
||||
version_added: historical
|
||||
short_description: Manages packages with the I(dnf) package manager
|
||||
description:
|
||||
- Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: dnf -y update. You can also pass a url or a local path to a rpm file."
|
||||
required: true
|
||||
version_added: "1.8"
|
||||
default: null
|
||||
aliases: []
|
||||
list:
|
||||
description:
|
||||
- Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples.
|
||||
required: false
|
||||
version_added: "1.8"
|
||||
default: null
|
||||
state:
|
||||
description:
|
||||
- Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
|
||||
required: false
|
||||
choices: [ "present", "latest", "absent" ]
|
||||
version_added: "1.8"
|
||||
default: "present"
|
||||
enablerepo:
|
||||
description:
|
||||
- I(Repoid) of repositories to enable for the install/update operation.
|
||||
These repos will not persist beyond the transaction.
|
||||
When specifying multiple repos, separate them with a ",".
|
||||
required: false
|
||||
version_added: "1.8"
|
||||
default: null
|
||||
aliases: []
|
||||
|
||||
disablerepo:
|
||||
description:
|
||||
- I(Repoid) of repositories to disable for the install/update operation.
|
||||
These repos will not persist beyond the transaction.
|
||||
When specifying multiple repos, separate them with a ",".
|
||||
required: false
|
||||
version_added: "1.8"
|
||||
default: null
|
||||
aliases: []
|
||||
|
||||
conf_file:
|
||||
description:
|
||||
- The remote dnf configuration file to use for the transaction.
|
||||
required: false
|
||||
version_added: "1.8"
|
||||
default: null
|
||||
aliases: []
|
||||
|
||||
disable_gpg_check:
|
||||
description:
|
||||
- Whether to disable the GPG checking of signatures of packages being
|
||||
installed. Has an effect only if state is I(present) or I(latest).
|
||||
required: false
|
||||
version_added: "1.8"
|
||||
default: "no"
|
||||
choices: ["yes", "no"]
|
||||
aliases: []
|
||||
|
||||
notes: []
|
||||
# informational: requirements for nodes
|
||||
requirements: [ dnf ]
|
||||
author: Cristian van Ee
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: install the latest version of Apache
|
||||
dnf: name=httpd state=latest
|
||||
|
||||
- name: remove the Apache package
|
||||
dnf: name=httpd state=absent
|
||||
|
||||
- name: install the latest version of Apache from the testing repo
|
||||
dnf: name=httpd enablerepo=testing state=present
|
||||
|
||||
- name: upgrade all packages
|
||||
dnf: name=* state=latest
|
||||
|
||||
- name: install the nginx rpm from a remote repo
|
||||
dnf: name=http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present
|
||||
|
||||
- name: install nginx rpm from a local file
|
||||
dnf: name=/usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present
|
||||
|
||||
- name: install the 'Development tools' package group
|
||||
dnf: name="@Development tools" state=present
|
||||
|
||||
'''
|
||||
|
||||
def_qf = "%{name}-%{version}-%{release}.%{arch}"
|
||||
|
||||
repoquery='/usr/bin/repoquery'
|
||||
if not os.path.exists(repoquery):
|
||||
repoquery = None
|
||||
|
||||
dnfbin='/usr/bin/dnf'
|
||||
|
||||
import syslog
|
||||
|
||||
def log(msg):
|
||||
syslog.openlog('ansible-dnf', 0, syslog.LOG_USER)
|
||||
syslog.syslog(syslog.LOG_NOTICE, msg)
|
||||
|
||||
def dnf_base(conf_file=None, cachedir=False):
|
||||
|
||||
my = dnf.Base()
|
||||
my.logging.verbose_level=0
|
||||
my.logging.verbose_level=0
|
||||
if conf_file and os.path.exists(conf_file):
|
||||
my.config = conf_file
|
||||
if cachedir or os.geteuid() != 0:
|
||||
if cachedir or os.geteuid() != 0:
|
||||
if hasattr(my, 'setCacheDir'):
|
||||
my.setCacheDir()
|
||||
else:
|
||||
cachedir = cachedir.dnf.Conf()
|
||||
my.repos.setCacheDir(cachedir)
|
||||
my.conf.cache = 0
|
||||
|
||||
return my
|
||||
|
||||
def install_dnf_utils(module):
|
||||
|
||||
if not module.check_mode:
|
||||
dnf_path = module.get_bin_path('dnf')
|
||||
if dnf_path:
|
||||
rc, so, se = module.run_command('%s -y install dnf-plugins-core' % dnf_path)
|
||||
if rc == 0:
|
||||
this_path = module.get_bin_path('repoquery')
|
||||
global repoquery
|
||||
repoquery = this_path
|
||||
|
||||
def po_to_nevra(po):
|
||||
|
||||
if hasattr(po, 'ui_nevra'):
|
||||
return po.ui_nevra
|
||||
else:
|
||||
return '%s-%s-%s.%s' % (po.name, po.version, po.release, po.arch)
|
||||
|
||||
def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[], is_pkg=False):
|
||||
|
||||
if not repoq:
|
||||
|
||||
pkgs = []
|
||||
try:
|
||||
my = dnf_base(conf_file)
|
||||
for rid in en_repos:
|
||||
my.repos.enableRepo(rid)
|
||||
for rid in dis_repos:
|
||||
my.repos.disableRepo(rid)
|
||||
|
||||
e,m,u = my.rpmdb.matchPackageNames([pkgspec])
|
||||
pkgs = e + m
|
||||
if not pkgs:
|
||||
pkgs.extend(my.returnInstalledPackagesByDep(pkgspec))
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Failure talking to dnf: %s" % e)
|
||||
|
||||
return [ po_to_nevra(p) for p in pkgs ]
|
||||
|
||||
else:
|
||||
|
||||
cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, pkgspec]
|
||||
rc,out,err = module.run_command(cmd)
|
||||
if not is_pkg:
|
||||
cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, "--whatprovides", pkgspec]
|
||||
rc2,out2,err2 = module.run_command(cmd)
|
||||
else:
|
||||
rc2,out2,err2 = (0, '', '')
|
||||
|
||||
if rc == 0 and rc2 == 0:
|
||||
out += out2
|
||||
return [ p for p in out.split('\n') if p.strip() ]
|
||||
else:
|
||||
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
|
||||
|
||||
return []
|
||||
|
||||
def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]):
|
||||
|
||||
if not repoq:
|
||||
|
||||
pkgs = []
|
||||
try:
|
||||
my = dnf_base(conf_file)
|
||||
for rid in en_repos:
|
||||
my.repos.enableRepo(rid)
|
||||
for rid in dis_repos:
|
||||
my.repos.disableRepo(rid)
|
||||
|
||||
e,m,u = my.pkgSack.matchPackageNames([pkgspec])
|
||||
pkgs = e + m
|
||||
if not pkgs:
|
||||
pkgs.extend(my.returnPackagesByDep(pkgspec))
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Failure talking to dnf: %s" % e)
|
||||
|
||||
return [ po_to_nevra(p) for p in pkgs ]
|
||||
|
||||
else:
|
||||
myrepoq = list(repoq)
|
||||
|
||||
for repoid in dis_repos:
|
||||
r_cmd = ['--disablerepo', repoid]
|
||||
myrepoq.extend(r_cmd)
|
||||
|
||||
for repoid in en_repos:
|
||||
r_cmd = ['--enablerepo', repoid]
|
||||
myrepoq.extend(r_cmd)
|
||||
|
||||
cmd = myrepoq + ["--qf", qf, pkgspec]
|
||||
rc,out,err = module.run_command(cmd)
|
||||
if rc == 0:
|
||||
return [ p for p in out.split('\n') if p.strip() ]
|
||||
else:
|
||||
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
|
||||
|
||||
|
||||
return []
|
||||
|
||||
def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]):
|
||||
|
||||
if not repoq:
|
||||
|
||||
retpkgs = []
|
||||
pkgs = []
|
||||
updates = []
|
||||
|
||||
try:
|
||||
my = dnf_base(conf_file)
|
||||
for rid in en_repos:
|
||||
my.repos.enableRepo(rid)
|
||||
for rid in dis_repos:
|
||||
my.repos.disableRepo(rid)
|
||||
|
||||
pkgs = my.returnPackagesByDep(pkgspec) + my.returnInstalledPackagesByDep(pkgspec)
|
||||
if not pkgs:
|
||||
e,m,u = my.pkgSack.matchPackageNames([pkgspec])
|
||||
pkgs = e + m
|
||||
updates = my.doPackageLists(pkgnarrow='updates').updates
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Failure talking to dnf: %s" % e)
|
||||
|
||||
for pkg in pkgs:
|
||||
if pkg in updates:
|
||||
retpkgs.append(pkg)
|
||||
|
||||
return set([ po_to_nevra(p) for p in retpkgs ])
|
||||
|
||||
else:
|
||||
myrepoq = list(repoq)
|
||||
for repoid in dis_repos:
|
||||
r_cmd = ['--disablerepo', repoid]
|
||||
myrepoq.extend(r_cmd)
|
||||
|
||||
for repoid in en_repos:
|
||||
r_cmd = ['--enablerepo', repoid]
|
||||
myrepoq.extend(r_cmd)
|
||||
|
||||
cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec]
|
||||
rc,out,err = module.run_command(cmd)
|
||||
|
||||
if rc == 0:
|
||||
return set([ p for p in out.split('\n') if p.strip() ])
|
||||
else:
|
||||
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
|
||||
|
||||
return []
|
||||
|
||||
def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]):
|
||||
|
||||
if not repoq:
|
||||
|
||||
pkgs = []
|
||||
try:
|
||||
my = dnf_base(conf_file)
|
||||
for rid in en_repos:
|
||||
my.repos.enableRepo(rid)
|
||||
for rid in dis_repos:
|
||||
my.repos.disableRepo(rid)
|
||||
|
||||
pkgs = my.returnPackagesByDep(req_spec) + my.returnInstalledPackagesByDep(req_spec)
|
||||
if not pkgs:
|
||||
e,m,u = my.pkgSack.matchPackageNames([req_spec])
|
||||
pkgs.extend(e)
|
||||
pkgs.extend(m)
|
||||
e,m,u = my.rpmdb.matchPackageNames([req_spec])
|
||||
pkgs.extend(e)
|
||||
pkgs.extend(m)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Failure talking to dnf: %s" % e)
|
||||
|
||||
return set([ po_to_nevra(p) for p in pkgs ])
|
||||
|
||||
else:
|
||||
myrepoq = list(repoq)
|
||||
for repoid in dis_repos:
|
||||
r_cmd = ['--disablerepo', repoid]
|
||||
myrepoq.extend(r_cmd)
|
||||
|
||||
for repoid in en_repos:
|
||||
r_cmd = ['--enablerepo', repoid]
|
||||
myrepoq.extend(r_cmd)
|
||||
|
||||
cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec]
|
||||
rc,out,err = module.run_command(cmd)
|
||||
cmd = myrepoq + ["--qf", qf, req_spec]
|
||||
rc2,out2,err2 = module.run_command(cmd)
|
||||
if rc == 0 and rc2 == 0:
|
||||
out += out2
|
||||
pkgs = set([ p for p in out.split('\n') if p.strip() ])
|
||||
if not pkgs:
|
||||
pkgs = is_installed(module, repoq, req_spec, conf_file, qf=qf)
|
||||
return pkgs
|
||||
else:
|
||||
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
|
||||
|
||||
return []
|
||||
|
||||
def transaction_exists(pkglist):
|
||||
"""
|
||||
checks the package list to see if any packages are
|
||||
involved in an incomplete transaction
|
||||
"""
|
||||
|
||||
conflicts = []
|
||||
if not transaction_helpers:
|
||||
return conflicts
|
||||
|
||||
# first, we create a list of the package 'nvreas'
|
||||
# so we can compare the pieces later more easily
|
||||
pkglist_nvreas = []
|
||||
for pkg in pkglist:
|
||||
pkglist_nvreas.append(splitFilename(pkg))
|
||||
|
||||
# next, we build the list of packages that are
|
||||
# contained within an unfinished transaction
|
||||
unfinished_transactions = find_unfinished_transactions()
|
||||
for trans in unfinished_transactions:
|
||||
steps = find_ts_remaining(trans)
|
||||
for step in steps:
|
||||
# the action is install/erase/etc., but we only
|
||||
# care about the package spec contained in the step
|
||||
(action, step_spec) = step
|
||||
(n,v,r,e,a) = splitFilename(step_spec)
|
||||
# and see if that spec is in the list of packages
|
||||
# requested for installation/updating
|
||||
for pkg in pkglist_nvreas:
|
||||
# if the name and arch match, we're going to assume
|
||||
# this package is part of a pending transaction
|
||||
# the label is just for display purposes
|
||||
label = "%s-%s" % (n,a)
|
||||
if n == pkg[0] and a == pkg[4]:
|
||||
if label not in conflicts:
|
||||
conflicts.append("%s-%s" % (n,a))
|
||||
break
|
||||
return conflicts
|
||||
|
||||
def local_nvra(module, path):
|
||||
"""return nvra of a local rpm passed in"""
|
||||
|
||||
cmd = ['/bin/rpm', '-qp' ,'--qf',
|
||||
'%{name}-%{version}-%{release}.%{arch}\n', path ]
|
||||
rc, out, err = module.run_command(cmd)
|
||||
if rc != 0:
|
||||
return None
|
||||
nvra = out.split('\n')[0]
|
||||
return nvra
|
||||
|
||||
def pkg_to_dict(pkgstr):
|
||||
|
||||
if pkgstr.strip():
|
||||
n,e,v,r,a,repo = pkgstr.split('|')
|
||||
else:
|
||||
return {'error_parsing': pkgstr}
|
||||
|
||||
d = {
|
||||
'name':n,
|
||||
'arch':a,
|
||||
'epoch':e,
|
||||
'release':r,
|
||||
'version':v,
|
||||
'repo':repo,
|
||||
'nevra': '%s:%s-%s-%s.%s' % (e,n,v,r,a)
|
||||
}
|
||||
|
||||
if repo == 'installed':
|
||||
d['dnfstate'] = 'installed'
|
||||
else:
|
||||
d['dnfstate'] = 'available'
|
||||
|
||||
return d
|
||||
|
||||
def repolist(module, repoq, qf="%{repoid}"):
|
||||
|
||||
cmd = repoq + ["--qf", qf, "-a"]
|
||||
rc,out,err = module.run_command(cmd)
|
||||
ret = []
|
||||
if rc == 0:
|
||||
ret = set([ p for p in out.split('\n') if p.strip() ])
|
||||
return ret
|
||||
|
||||
def list_stuff(module, conf_file, stuff):
|
||||
|
||||
qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}"
|
||||
repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet', '-q']
|
||||
if conf_file and os.path.exists(conf_file):
|
||||
repoq += ['-c', conf_file]
|
||||
|
||||
if stuff == 'installed':
|
||||
return [ pkg_to_dict(p) for p in is_installed(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
|
||||
elif stuff == 'updates':
|
||||
return [ pkg_to_dict(p) for p in is_update(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
|
||||
elif stuff == 'available':
|
||||
return [ pkg_to_dict(p) for p in is_available(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
|
||||
elif stuff == 'repos':
|
||||
return [ dict(repoid=name, state='enabled') for name in repolist(module, repoq) if name.strip() ]
|
||||
else:
|
||||
return [ pkg_to_dict(p) for p in is_installed(module, repoq, stuff, conf_file, qf=qf) + is_available(module, repoq, stuff, conf_file, qf=qf) if p.strip() ]
|
||||
|
||||
def install(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos):
|
||||
|
||||
res = {}
|
||||
res['results'] = []
|
||||
res['msg'] = ''
|
||||
res['rc'] = 0
|
||||
res['changed'] = False
|
||||
|
||||
for spec in items:
|
||||
pkg = None
|
||||
|
||||
# check if pkgspec is installed (if possible for idempotence)
|
||||
# localpkg
|
||||
if spec.endswith('.rpm') and '://' not in spec:
|
||||
# get the pkg name-v-r.arch
|
||||
if not os.path.exists(spec):
|
||||
res['msg'] += "No Package file matching '%s' found on system" % spec
|
||||
module.fail_json(**res)
|
||||
|
||||
nvra = local_nvra(module, spec)
|
||||
# look for them in the rpmdb
|
||||
if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
# if they are there, skip it
|
||||
continue
|
||||
pkg = spec
|
||||
|
||||
# URL
|
||||
elif '://' in spec:
|
||||
pkg = spec
|
||||
|
||||
#groups :(
|
||||
elif spec.startswith('@'):
|
||||
# complete wild ass guess b/c it's a group
|
||||
pkg = spec
|
||||
|
||||
# range requires or file-requires or pkgname :(
|
||||
else:
|
||||
# most common case is the pkg is already installed and done
|
||||
# short circuit all the bs - and search for it as a pkg in is_installed
|
||||
# if you find it then we're done
|
||||
if not set(['*','?']).intersection(set(spec)):
|
||||
pkgs = is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True)
|
||||
if pkgs:
|
||||
res['results'].append('%s providing %s is already installed' % (pkgs[0], spec))
|
||||
continue
|
||||
|
||||
# look up what pkgs provide this
|
||||
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos)
|
||||
if not pkglist:
|
||||
res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
|
||||
module.fail_json(**res)
|
||||
|
||||
# if any of the packages are involved in a transaction, fail now
|
||||
# so that we don't hang on the dnf operation later
|
||||
conflicts = transaction_exists(pkglist)
|
||||
if len(conflicts) > 0:
|
||||
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
|
||||
module.fail_json(**res)
|
||||
|
||||
# if any of them are installed
|
||||
# then nothing to do
|
||||
|
||||
found = False
|
||||
for this in pkglist:
|
||||
if is_installed(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True):
|
||||
found = True
|
||||
res['results'].append('%s providing %s is already installed' % (this, spec))
|
||||
break
|
||||
|
||||
# if the version of the pkg you have installed is not in ANY repo, but there are
|
||||
# other versions in the repos (both higher and lower) then the previous checks won't work.
|
||||
# so we check one more time. This really only works for pkgname - not for file provides or virt provides
|
||||
# but virt provides should be all caught in what_provides on its own.
|
||||
# highly irritating
|
||||
if not found:
|
||||
if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
found = True
|
||||
res['results'].append('package providing %s is already installed' % (spec))
|
||||
|
||||
if found:
|
||||
continue
|
||||
|
||||
# if not - then pass in the spec as what to install
|
||||
# we could get here if nothing provides it but that's not
|
||||
# the error we're catching here
|
||||
pkg = spec
|
||||
|
||||
cmd = dnf_basecmd + ['install', pkg]
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
changed = True
|
||||
|
||||
rc, out, err = module.run_command(cmd)
|
||||
|
||||
# Fail on invalid urls:
|
||||
if (rc == 1 and '://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)):
|
||||
err = 'Package at %s could not be installed' % spec
|
||||
module.fail_json(changed=False,msg=err,rc=1)
|
||||
elif (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out:
|
||||
# avoid failing in the 'Nothing To Do' case
|
||||
# this may happen with an URL spec.
|
||||
# for an already installed group,
|
||||
# we get rc = 0 and 'Nothing to do' in out, not in err.
|
||||
rc = 0
|
||||
err = ''
|
||||
out = '%s: Nothing to do' % spec
|
||||
changed = False
|
||||
|
||||
res['rc'] += rc
|
||||
res['results'].append(out)
|
||||
res['msg'] += err
|
||||
|
||||
# FIXME - if we did an install - go and check the rpmdb to see if it actually installed
|
||||
# look for the pkg in rpmdb
|
||||
# look for the pkg via obsoletes
|
||||
|
||||
# accumulate any changes
|
||||
res['changed'] |= changed
|
||||
|
||||
module.exit_json(**res)
|
||||
|
||||
|
||||
def remove(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos):
|
||||
|
||||
res = {}
|
||||
res['results'] = []
|
||||
res['msg'] = ''
|
||||
res['changed'] = False
|
||||
res['rc'] = 0
|
||||
|
||||
for pkg in items:
|
||||
is_group = False
|
||||
# group remove - this is doom on a stick
|
||||
if pkg.startswith('@'):
|
||||
is_group = True
|
||||
else:
|
||||
if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
res['results'].append('%s is not installed' % pkg)
|
||||
continue
|
||||
|
||||
# run an actual dnf transaction
|
||||
cmd = dnf_basecmd + ["remove", pkg]
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
rc, out, err = module.run_command(cmd)
|
||||
|
||||
res['rc'] += rc
|
||||
res['results'].append(out)
|
||||
res['msg'] += err
|
||||
|
||||
# compile the results into one batch. If anything is changed
|
||||
# then mark changed
|
||||
# at the end - if we've end up failed then fail out of the rest
|
||||
# of the process
|
||||
|
||||
# at this point we should check to see if the pkg is no longer present
|
||||
|
||||
if not is_group: # we can't sensibly check for a group being uninstalled reliably
|
||||
# look to see if the pkg shows up from is_installed. If it doesn't
|
||||
if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
res['changed'] = True
|
||||
else:
|
||||
module.fail_json(**res)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(**res)
|
||||
|
||||
module.exit_json(**res)
|
||||
|
||||
def latest(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos):
|
||||
|
||||
res = {}
|
||||
res['results'] = []
|
||||
res['msg'] = ''
|
||||
res['changed'] = False
|
||||
res['rc'] = 0
|
||||
|
||||
for spec in items:
|
||||
|
||||
pkg = None
|
||||
basecmd = 'update'
|
||||
cmd = ''
|
||||
# groups, again
|
||||
if spec.startswith('@'):
|
||||
pkg = spec
|
||||
|
||||
elif spec == '*': #update all
|
||||
# use check-update to see if there is any need
|
||||
rc,out,err = module.run_command(dnf_basecmd + ['check-update'])
|
||||
if rc == 100:
|
||||
cmd = dnf_basecmd + [basecmd]
|
||||
else:
|
||||
res['results'].append('All packages up to date')
|
||||
continue
|
||||
|
||||
# dep/pkgname - find it
|
||||
else:
|
||||
if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
basecmd = 'update'
|
||||
else:
|
||||
basecmd = 'install'
|
||||
|
||||
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos)
|
||||
if not pkglist:
|
||||
res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
|
||||
module.fail_json(**res)
|
||||
|
||||
nothing_to_do = True
|
||||
for this in pkglist:
|
||||
if basecmd == 'install' and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
nothing_to_do = False
|
||||
break
|
||||
|
||||
if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=en_repos):
|
||||
nothing_to_do = False
|
||||
break
|
||||
|
||||
if nothing_to_do:
|
||||
res['results'].append("All packages providing %s are up to date" % spec)
|
||||
continue
|
||||
|
||||
# if any of the packages are involved in a transaction, fail now
|
||||
# so that we don't hang on the dnf operation later
|
||||
conflicts = transaction_exists(pkglist)
|
||||
if len(conflicts) > 0:
|
||||
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
|
||||
module.fail_json(**res)
|
||||
|
||||
pkg = spec
|
||||
if not cmd:
|
||||
cmd = dnf_basecmd + [basecmd, pkg]
|
||||
|
||||
if module.check_mode:
|
||||
return module.exit_json(changed=True)
|
||||
|
||||
rc, out, err = module.run_command(cmd)
|
||||
|
||||
res['rc'] += rc
|
||||
res['results'].append(out)
|
||||
res['msg'] += err
|
||||
|
||||
# FIXME if it is - update it and check to see if it applied
|
||||
# check to see if there is no longer an update available for the pkgspec
|
||||
|
||||
if rc:
|
||||
res['failed'] = True
|
||||
else:
|
||||
res['changed'] = True
|
||||
|
||||
module.exit_json(**res)
|
||||
|
||||
def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo,
|
||||
disable_gpg_check):
|
||||
|
||||
# take multiple args comma separated
|
||||
items = pkgspec.split(',')
|
||||
|
||||
# need debug level 2 to get 'Nothing to do' for groupinstall.
|
||||
dnf_basecmd = [dnfbin, '-d', '2', '-y']
|
||||
|
||||
|
||||
if not repoquery:
|
||||
repoq = None
|
||||
else:
|
||||
repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet', '-q']
|
||||
|
||||
if conf_file and os.path.exists(conf_file):
|
||||
dnf_basecmd += ['-c', conf_file]
|
||||
if repoq:
|
||||
repoq += ['-c', conf_file]
|
||||
|
||||
dis_repos =[]
|
||||
en_repos = []
|
||||
if disablerepo:
|
||||
dis_repos = disablerepo.split(',')
|
||||
if enablerepo:
|
||||
en_repos = enablerepo.split(',')
|
||||
|
||||
for repoid in dis_repos:
|
||||
r_cmd = ['--disablerepo=%s' % repoid]
|
||||
dnf_basecmd.extend(r_cmd)
|
||||
|
||||
for repoid in en_repos:
|
||||
r_cmd = ['--enablerepo=%s' % repoid]
|
||||
dnf_basecmd.extend(r_cmd)
|
||||
|
||||
if state in ['installed', 'present', 'latest']:
|
||||
my = dnf_base(conf_file)
|
||||
try:
|
||||
for r in dis_repos:
|
||||
my.repos.disableRepo(r)
|
||||
|
||||
current_repos = dnf.yum.config.RepoConf()
|
||||
for r in en_repos:
|
||||
try:
|
||||
my.repos.enableRepo(r)
|
||||
new_repos = my.repos.repos.keys()
|
||||
for i in new_repos:
|
||||
if not i in current_repos:
|
||||
rid = my.repos.getRepo(i)
|
||||
a = rid.repoXML.repoid
|
||||
current_repos = new_repos
|
||||
except dnf.exceptions.Error, e:
|
||||
module.fail_json(msg="Error setting/accessing repo %s: %s" % (r, e))
|
||||
except dnf.exceptions.Error, e:
|
||||
module.fail_json(msg="Error accessing repos: %s" % e)
|
||||
|
||||
if state in ['installed', 'present']:
|
||||
if disable_gpg_check:
|
||||
dnf_basecmd.append('--nogpgcheck')
|
||||
install(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos)
|
||||
elif state in ['removed', 'absent']:
|
||||
remove(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos)
|
||||
elif state == 'latest':
|
||||
if disable_gpg_check:
|
||||
dnf_basecmd.append('--nogpgcheck')
|
||||
latest(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos)
|
||||
|
||||
# should be caught by AnsibleModule argument_spec
|
||||
return dict(changed=False, failed=True, results='', errors='unexpected state')
|
||||
|
||||
def main():
|
||||
|
||||
# state=installed name=pkgspec
|
||||
# state=removed name=pkgspec
|
||||
# state=latest name=pkgspec
|
||||
#
|
||||
# informational commands:
|
||||
# list=installed
|
||||
# list=updates
|
||||
# list=available
|
||||
# list=repos
|
||||
# list=pkgspec
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
name=dict(aliases=['pkg']),
|
||||
# removed==absent, installed==present, these are accepted as aliases
|
||||
state=dict(default='installed', choices=['absent','present','installed','removed','latest']),
|
||||
enablerepo=dict(),
|
||||
disablerepo=dict(),
|
||||
list=dict(),
|
||||
conf_file=dict(default=None),
|
||||
disable_gpg_check=dict(required=False, default="no", type='bool'),
|
||||
# this should not be needed, but exists as a failsafe
|
||||
install_repoquery=dict(required=False, default="yes", type='bool'),
|
||||
),
|
||||
required_one_of = [['name','list']],
|
||||
mutually_exclusive = [['name','list']],
|
||||
supports_check_mode = True
|
||||
)
|
||||
|
||||
# this should not be needed, but exists as a failsafe
|
||||
params = module.params
|
||||
if params['install_repoquery'] and not repoquery and not module.check_mode:
|
||||
install_dnf_utils(module)
|
||||
|
||||
if params['list']:
|
||||
if not repoquery:
|
||||
module.fail_json(msg="repoquery is required to use list= with this module. Please install the dnf-utils package.")
|
||||
results = dict(results=list_stuff(module, params['conf_file'], params['list']))
|
||||
module.exit_json(**results)
|
||||
|
||||
else:
|
||||
pkg = params['name']
|
||||
state = params['state']
|
||||
enablerepo = params.get('enablerepo', '')
|
||||
disablerepo = params.get('disablerepo', '')
|
||||
disable_gpg_check = params['disable_gpg_check']
|
||||
res = ensure(module, state, pkg, params['conf_file'], enablerepo,
|
||||
disablerepo, disable_gpg_check)
|
||||
module.fail_json(msg="we should never get here unless this all failed", **res)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
||||
|
0
packaging/language/__init__.py
Normal file
0
packaging/language/__init__.py
Normal file
|
@ -101,7 +101,10 @@ def parse_out(string):
|
|||
return re.sub("\s+", " ", string).strip()
|
||||
|
||||
def has_changed(string):
|
||||
return (re.match("Nothing to install or update", string) != None)
|
||||
if "Nothing to install or update" in string:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def composer_install(module, command, options):
|
||||
php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
|
|
@ -29,7 +29,7 @@ version_added: "1.6"
|
|||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the Perl library to install
|
||||
- The name of the Perl library to install. You may use the "full distribution path", e.g. MIYAGAWA/Plack-0.99_05.tar.gz
|
||||
required: false
|
||||
default: null
|
||||
aliases: ["pkg"]
|
||||
|
@ -56,6 +56,8 @@ options:
|
|||
examples:
|
||||
- code: "cpanm: name=Dancer"
|
||||
description: Install I(Dancer) perl package.
|
||||
- code: "cpanm: name=MIYAGAWA/Plack-0.99_05.tar.gz"
|
||||
description: Install version 0.99_05 of the I(Plack) perl package.
|
||||
- code: "cpanm: name=Dancer locallib=/srv/webapps/my_app/extlib"
|
||||
description: "Install I(Dancer) (U(http://perldancer.org/)) into the specified I(locallib)"
|
||||
- code: "cpanm: from_path=/srv/webapps/my_app/src/"
|
0
packaging/os/__init__.py
Normal file
0
packaging/os/__init__.py
Normal file
|
@ -67,7 +67,7 @@ def query_package(module, opkg_path, name, state="present"):
|
|||
|
||||
if state == "present":
|
||||
|
||||
rc, out, err = module.run_command("%s list-installed | grep -q ^%s" % (pipes.quote(opkg_path), pipes.quote(name)), use_unsafe_shell=True)
|
||||
rc, out, err = module.run_command("%s list-installed | grep -q \"^%s \"" % (pipes.quote(opkg_path), pipes.quote(name)), use_unsafe_shell=True)
|
||||
if rc == 0:
|
||||
return True
|
||||
|
|
@ -33,9 +33,9 @@ DOCUMENTATION = '''
|
|||
module: zypper
|
||||
author: Patrick Callahan
|
||||
version_added: "1.2"
|
||||
short_description: Manage packages on SuSE and openSuSE
|
||||
short_description: Manage packages on SUSE and openSUSE
|
||||
description:
|
||||
- Manage packages on SuSE and openSuSE using the zypper and rpm tools.
|
||||
- Manage packages on SUSE and openSUSE using the zypper and rpm tools.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
@ -84,6 +84,16 @@ EXAMPLES = '''
|
|||
- zypper: name=nmap state=absent
|
||||
'''
|
||||
|
||||
# Function used for getting zypper version
|
||||
def zypper_version(module):
|
||||
"""Return (rc, message) tuple"""
|
||||
cmd = ['/usr/bin/zypper', '-V']
|
||||
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
|
||||
if rc == 0:
|
||||
return rc, stdout
|
||||
else:
|
||||
return rc, stderr
|
||||
|
||||
# Function used for getting versions of currently installed packages.
|
||||
def get_current_version(m, name):
|
||||
cmd = ['/bin/rpm', '-q', '--qf', '%{NAME} %{VERSION}-%{RELEASE}\n']
|
||||
|
@ -130,7 +140,7 @@ def get_package_state(m, packages):
|
|||
return installed_state
|
||||
|
||||
# Function used to make sure a package is present.
|
||||
def package_present(m, name, installed_state, disable_gpg_check, disable_recommends):
|
||||
def package_present(m, name, installed_state, disable_gpg_check, disable_recommends, old_zypper):
|
||||
packages = []
|
||||
for package in name:
|
||||
if installed_state[package] is False:
|
||||
|
@ -138,12 +148,14 @@ def package_present(m, name, installed_state, disable_gpg_check, disable_recomme
|
|||
if len(packages) != 0:
|
||||
cmd = ['/usr/bin/zypper', '--non-interactive']
|
||||
# add global options before zypper command
|
||||
if disable_gpg_check:
|
||||
if disable_gpg_check and not old_zypper:
|
||||
cmd.append('--no-gpg-check')
|
||||
else:
|
||||
cmd.append('--no-gpg-checks')
|
||||
|
||||
cmd.extend(['install', '--auto-agree-with-licenses'])
|
||||
# add install parameter
|
||||
if disable_recommends:
|
||||
if disable_recommends and not old_zypper:
|
||||
cmd.append('--no-recommends')
|
||||
cmd.extend(packages)
|
||||
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
|
||||
|
@ -161,16 +173,19 @@ def package_present(m, name, installed_state, disable_gpg_check, disable_recomme
|
|||
return (rc, stdout, stderr, changed)
|
||||
|
||||
# Function used to make sure a package is the latest available version.
|
||||
def package_latest(m, name, installed_state, disable_gpg_check, disable_recommends):
|
||||
def package_latest(m, name, installed_state, disable_gpg_check, disable_recommends, old_zypper):
|
||||
|
||||
# first of all, make sure all the packages are installed
|
||||
(rc, stdout, stderr, changed) = package_present(m, name, installed_state, disable_gpg_check)
|
||||
(rc, stdout, stderr, changed) = package_present(m, name, installed_state, disable_gpg_check, disable_recommends, old_zypper)
|
||||
|
||||
# if we've already made a change, we don't have to check whether a version changed
|
||||
if not changed:
|
||||
pre_upgrade_versions = get_current_version(m, name)
|
||||
|
||||
cmd = ['/usr/bin/zypper', '--non-interactive', 'update', '--auto-agree-with-licenses']
|
||||
if old_zypper:
|
||||
cmd = ['/usr/bin/zypper', '--non-interactive', 'install', '--auto-agree-with-licenses']
|
||||
else:
|
||||
cmd = ['/usr/bin/zypper', '--non-interactive', 'update', '--auto-agree-with-licenses']
|
||||
cmd.extend(name)
|
||||
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
|
||||
|
||||
|
@ -183,7 +198,7 @@ def package_latest(m, name, installed_state, disable_gpg_check, disable_recommen
|
|||
return (rc, stdout, stderr, changed)
|
||||
|
||||
# Function used to make sure a package is not installed.
|
||||
def package_absent(m, name, installed_state):
|
||||
def package_absent(m, name, installed_state, old_zypper):
|
||||
packages = []
|
||||
for package in name:
|
||||
if installed_state[package] is True:
|
||||
|
@ -234,16 +249,23 @@ def main():
|
|||
result['name'] = name
|
||||
result['state'] = state
|
||||
|
||||
rc, out = zypper_version(module)
|
||||
match = re.match(r'zypper\s+(\d+)\.(\d+)\.(\d+)', out)
|
||||
if not match or int(match.group(1)) > 0:
|
||||
old_zypper = False
|
||||
else:
|
||||
old_zypper = True
|
||||
|
||||
# Get package state
|
||||
installed_state = get_package_state(module, name)
|
||||
|
||||
# Perform requested action
|
||||
if state in ['installed', 'present']:
|
||||
(rc, stdout, stderr, changed) = package_present(module, name, installed_state, disable_gpg_check, disable_recommends)
|
||||
(rc, stdout, stderr, changed) = package_present(module, name, installed_state, disable_gpg_check, disable_recommends, old_zypper)
|
||||
elif state in ['absent', 'removed']:
|
||||
(rc, stdout, stderr, changed) = package_absent(module, name, installed_state)
|
||||
(rc, stdout, stderr, changed) = package_absent(module, name, installed_state, old_zypper)
|
||||
elif state == 'latest':
|
||||
(rc, stdout, stderr, changed) = package_latest(module, name, installed_state, disable_gpg_check, disable_recommends)
|
||||
(rc, stdout, stderr, changed) = package_latest(module, name, installed_state, disable_gpg_check, disable_recommends, old_zypper)
|
||||
|
||||
if rc != 0:
|
||||
if stderr:
|
|
@ -72,16 +72,24 @@ EXAMPLES = '''
|
|||
# Add python development repository
|
||||
- zypper_repository: repo=http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo
|
||||
'''
|
||||
from xml.dom.minidom import parseString as parseXML
|
||||
|
||||
REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
|
||||
|
||||
def zypper_version(module):
|
||||
"""Return (rc, message) tuple"""
|
||||
cmd = ['/usr/bin/zypper', '-V']
|
||||
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
|
||||
if rc == 0:
|
||||
return rc, stdout
|
||||
else:
|
||||
return rc, stderr
|
||||
|
||||
def _parse_repos(module):
|
||||
"""parses the output of zypper -x lr and returns a parse repo dictionary"""
|
||||
cmd = ['/usr/bin/zypper', '-x', 'lr']
|
||||
repos = []
|
||||
|
||||
from xml.dom.minidom import parseString as parseXML
|
||||
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
|
||||
dom = parseXML(stdout)
|
||||
repo_list = dom.getElementsByTagName('repo')
|
||||
|
@ -95,8 +103,25 @@ def _parse_repos(module):
|
|||
|
||||
return repos
|
||||
|
||||
def _parse_repos_old(module):
|
||||
"""parses the output of zypper sl and returns a parse repo dictionary"""
|
||||
cmd = ['/usr/bin/zypper', 'sl']
|
||||
repos = []
|
||||
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
|
||||
for line in stdout.split('\n'):
|
||||
matched = re.search(r'\d+\s+\|\s+(?P<enabled>\w+)\s+\|\s+(?P<autorefresh>\w+)\s+\|\s+(?P<type>\w+)\s+\|\s+(?P<name>\w+)\s+\|\s+(?P<url>.*)', line)
|
||||
if matched == None:
|
||||
continue
|
||||
|
||||
def repo_exists(module, **kwargs):
|
||||
m = matched.groupdict()
|
||||
m['alias']= m['name']
|
||||
m['priority'] = 100
|
||||
m['gpgcheck'] = 1
|
||||
repos.append(m)
|
||||
|
||||
return repos
|
||||
|
||||
def repo_exists(module, old_zypper, **kwargs):
|
||||
|
||||
def repo_subset(realrepo, repocmp):
|
||||
for k in repocmp:
|
||||
|
@ -109,7 +134,10 @@ def repo_exists(module, **kwargs):
|
|||
return False
|
||||
return True
|
||||
|
||||
repos = _parse_repos(module)
|
||||
if old_zypper:
|
||||
repos = _parse_repos_old(module)
|
||||
else:
|
||||
repos = _parse_repos(module)
|
||||
|
||||
for repo in repos:
|
||||
if repo_subset(repo, kwargs):
|
||||
|
@ -117,13 +145,21 @@ def repo_exists(module, **kwargs):
|
|||
return False
|
||||
|
||||
|
||||
def add_repo(module, repo, alias, description, disable_gpg_check):
|
||||
cmd = ['/usr/bin/zypper', 'ar', '--check', '--refresh']
|
||||
def add_repo(module, repo, alias, description, disable_gpg_check, old_zypper):
|
||||
if old_zypper:
|
||||
cmd = ['/usr/bin/zypper', 'sa']
|
||||
else:
|
||||
cmd = ['/usr/bin/zypper', 'ar', '--check', '--refresh']
|
||||
|
||||
if repo.startswith("file:/") and old_zypper:
|
||||
cmd.extend(['-t', 'Plaindir'])
|
||||
else:
|
||||
cmd.extend(['-t', 'plaindir'])
|
||||
|
||||
if description:
|
||||
cmd.extend(['--name', description])
|
||||
|
||||
if disable_gpg_check:
|
||||
if disable_gpg_check and not old_zypper:
|
||||
cmd.append('--no-gpgcheck')
|
||||
|
||||
cmd.append(repo)
|
||||
|
@ -138,14 +174,21 @@ def add_repo(module, repo, alias, description, disable_gpg_check):
|
|||
elif 'already exists. Please use another alias' in stderr:
|
||||
changed = False
|
||||
else:
|
||||
module.fail_json(msg=stderr if stderr else stdout)
|
||||
#module.fail_json(msg=stderr if stderr else stdout)
|
||||
if stderr:
|
||||
module.fail_json(msg=stderr)
|
||||
else:
|
||||
module.fail_json(msg=stdout)
|
||||
|
||||
return changed
|
||||
|
||||
|
||||
def remove_repo(module, repo, alias):
|
||||
def remove_repo(module, repo, alias, old_zypper):
|
||||
|
||||
cmd = ['/usr/bin/zypper', 'rr']
|
||||
if old_zypper:
|
||||
cmd = ['/usr/bin/zypper', 'sd']
|
||||
else:
|
||||
cmd = ['/usr/bin/zypper', 'rr']
|
||||
if alias:
|
||||
cmd.append(alias)
|
||||
else:
|
||||
|
@ -158,7 +201,11 @@ def remove_repo(module, repo, alias):
|
|||
|
||||
def fail_if_rc_is_null(module, rc, stdout, stderr):
|
||||
if rc != 0:
|
||||
module.fail_json(msg=stderr if stderr else stdout)
|
||||
#module.fail_json(msg=stderr if stderr else stdout)
|
||||
if stderr:
|
||||
module.fail_json(msg=stderr)
|
||||
else:
|
||||
module.fail_json(msg=stdout)
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -182,6 +229,13 @@ def main():
|
|||
def exit_unchanged():
|
||||
module.exit_json(changed=False, repo=repo, state=state, name=name)
|
||||
|
||||
rc, out = zypper_version(module)
|
||||
match = re.match(r'zypper\s+(\d+)\.(\d+)\.(\d+)', out)
|
||||
if not match or int(match.group(1)) > 0:
|
||||
old_zypper = False
|
||||
else:
|
||||
old_zypper = True
|
||||
|
||||
# Check run-time module parameters
|
||||
if state == 'present' and not repo:
|
||||
module.fail_json(msg='Module option state=present requires repo')
|
||||
|
@ -196,22 +250,22 @@ def main():
|
|||
module.fail_json(msg='Name required when adding non-repo files:')
|
||||
|
||||
if repo and repo.endswith('.repo'):
|
||||
exists = repo_exists(module, url=repo, alias=name)
|
||||
exists = repo_exists(module, old_zypper, url=repo, alias=name)
|
||||
elif repo:
|
||||
exists = repo_exists(module, url=repo)
|
||||
exists = repo_exists(module, old_zypper, url=repo)
|
||||
else:
|
||||
exists = repo_exists(module, alias=name)
|
||||
exists = repo_exists(module, old_zypper, alias=name)
|
||||
|
||||
if state == 'present':
|
||||
if exists:
|
||||
exit_unchanged()
|
||||
|
||||
changed = add_repo(module, repo, name, description, disable_gpg_check)
|
||||
changed = add_repo(module, repo, name, description, disable_gpg_check, old_zypper)
|
||||
elif state == 'absent':
|
||||
if not exists:
|
||||
exit_unchanged()
|
||||
|
||||
changed = remove_repo(module, repo, name)
|
||||
changed = remove_repo(module, repo, name, old_zypper)
|
||||
|
||||
module.exit_json(changed=changed, repo=repo, state=state)
|
||||
|
0
source_control/__init__.py
Normal file
0
source_control/__init__.py
Normal file
|
@ -57,6 +57,12 @@ options:
|
|||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
content_type:
|
||||
description:
|
||||
- Content type to use for requests made to the webhook
|
||||
required: false
|
||||
default: 'json'
|
||||
choices: ['json', 'form']
|
||||
|
||||
author: Phillip Gentry, CX Inc
|
||||
'''
|
||||
|
@ -69,7 +75,7 @@ EXAMPLES = '''
|
|||
- local_action: github_hooks action=cleanall user={{ gituser }} oauthkey={{ oauthkey }} repo={{ repo }}
|
||||
'''
|
||||
|
||||
def list(module, hookurl, oauthkey, repo, user):
|
||||
def _list(module, hookurl, oauthkey, repo, user):
|
||||
url = "%s/hooks" % repo
|
||||
auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '')
|
||||
headers = {
|
||||
|
@ -81,38 +87,38 @@ def list(module, hookurl, oauthkey, repo, user):
|
|||
else:
|
||||
return False, response.read()
|
||||
|
||||
def clean504(module, hookurl, oauthkey, repo, user):
|
||||
current_hooks = list(hookurl, oauthkey, repo, user)[1]
|
||||
def _clean504(module, hookurl, oauthkey, repo, user):
|
||||
current_hooks = _list(hookurl, oauthkey, repo, user)[1]
|
||||
decoded = json.loads(current_hooks)
|
||||
|
||||
for hook in decoded:
|
||||
if hook['last_response']['code'] == 504:
|
||||
# print "Last response was an ERROR for hook:"
|
||||
# print hook['id']
|
||||
delete(module, hookurl, oauthkey, repo, user, hook['id'])
|
||||
_delete(module, hookurl, oauthkey, repo, user, hook['id'])
|
||||
|
||||
return 0, current_hooks
|
||||
|
||||
def cleanall(module, hookurl, oauthkey, repo, user):
|
||||
current_hooks = list(hookurl, oauthkey, repo, user)[1]
|
||||
def _cleanall(module, hookurl, oauthkey, repo, user):
|
||||
current_hooks = _list(hookurl, oauthkey, repo, user)[1]
|
||||
decoded = json.loads(current_hooks)
|
||||
|
||||
for hook in decoded:
|
||||
if hook['last_response']['code'] != 200:
|
||||
# print "Last response was an ERROR for hook:"
|
||||
# print hook['id']
|
||||
delete(module, hookurl, oauthkey, repo, user, hook['id'])
|
||||
_delete(module, hookurl, oauthkey, repo, user, hook['id'])
|
||||
|
||||
return 0, current_hooks
|
||||
|
||||
def create(module, hookurl, oauthkey, repo, user):
|
||||
def _create(module, hookurl, oauthkey, repo, user, content_type):
|
||||
url = "%s/hooks" % repo
|
||||
values = {
|
||||
"active": True,
|
||||
"name": "web",
|
||||
"config": {
|
||||
"url": "%s" % hookurl,
|
||||
"content_type": "json"
|
||||
"content_type": "%s" % content_type
|
||||
}
|
||||
}
|
||||
data = json.dumps(values)
|
||||
|
@ -126,7 +132,7 @@ def create(module, hookurl, oauthkey, repo, user):
|
|||
else:
|
||||
return 0, response.read()
|
||||
|
||||
def delete(module, hookurl, oauthkey, repo, user, hookid):
|
||||
def _delete(module, hookurl, oauthkey, repo, user, hookid):
|
||||
url = "%s/hooks/%s" % (repo, hookid)
|
||||
auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '')
|
||||
headers = {
|
||||
|
@ -144,6 +150,7 @@ def main():
|
|||
repo=dict(required=True),
|
||||
user=dict(required=True),
|
||||
validate_certs=dict(default='yes', type='bool'),
|
||||
content_type=dict(default='json', choices=['json', 'form']),
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -152,18 +159,19 @@ def main():
|
|||
oauthkey = module.params['oauthkey']
|
||||
repo = module.params['repo']
|
||||
user = module.params['user']
|
||||
content_type = module.params['content_type']
|
||||
|
||||
if action == "list":
|
||||
(rc, out) = list(module, hookurl, oauthkey, repo, user)
|
||||
(rc, out) = _list(module, hookurl, oauthkey, repo, user)
|
||||
|
||||
if action == "clean504":
|
||||
(rc, out) = clean504(module, hookurl, oauthkey, repo, user)
|
||||
(rc, out) = _clean504(module, hookurl, oauthkey, repo, user)
|
||||
|
||||
if action == "cleanall":
|
||||
(rc, out) = cleanall(module, hookurl, oauthkey, repo, user)
|
||||
(rc, out) = _cleanall(module, hookurl, oauthkey, repo, user)
|
||||
|
||||
if action == "create":
|
||||
(rc, out) = create(module, hookurl, oauthkey, repo, user)
|
||||
(rc, out) = _create(module, hookurl, oauthkey, repo, user, content_type)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed", result=out)
|
||||
|
|
|
@ -62,7 +62,8 @@ def main():
|
|||
name = dict(required=True),
|
||||
path = dict(required=True),
|
||||
link = dict(required=False),
|
||||
)
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
params = module.params
|
||||
|
@ -74,13 +75,14 @@ def main():
|
|||
|
||||
current_path = None
|
||||
all_alternatives = []
|
||||
os_family = None
|
||||
|
||||
(rc, query_output, query_error) = module.run_command(
|
||||
[UPDATE_ALTERNATIVES, '--query', name]
|
||||
)
|
||||
|
||||
# Gather the current setting and all alternatives from the query output.
|
||||
# Query output should look something like this:
|
||||
# Query output should look something like this on Debian systems:
|
||||
|
||||
# Name: java
|
||||
# Link: /usr/bin/java
|
||||
|
@ -101,6 +103,7 @@ def main():
|
|||
# java.1.gz /usr/lib/jvm/java-7-openjdk-amd64/jre/man/man1/java.1.gz
|
||||
|
||||
if rc == 0:
|
||||
os_family = "Debian"
|
||||
for line in query_output.splitlines():
|
||||
split_line = line.split(':')
|
||||
if len(split_line) == 2:
|
||||
|
@ -112,11 +115,27 @@ def main():
|
|||
all_alternatives.append(value)
|
||||
elif key == 'Link' and not link:
|
||||
link = value
|
||||
elif rc == 2:
|
||||
os_family = "RedHat"
|
||||
# This is the version of update-alternatives that is shipped with
|
||||
# chkconfig on RedHat-based systems. Try again with the right options.
|
||||
(rc, query_output, query_error) = module.run_command(
|
||||
[UPDATE_ALTERNATIVES, '--list']
|
||||
)
|
||||
for line in query_output.splitlines():
|
||||
line_name, line_mode, line_path = line.strip().split("\t")
|
||||
if line_name != name:
|
||||
continue
|
||||
current_path = line_path
|
||||
break
|
||||
|
||||
if current_path != path:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True, current_path=current_path)
|
||||
try:
|
||||
# install the requested path if necessary
|
||||
if path not in all_alternatives:
|
||||
# (unsupported on the RedHat version)
|
||||
if path not in all_alternatives and os_family == "Debian":
|
||||
module.run_command(
|
||||
[UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)],
|
||||
check_rc=True
|
||||
|
|
332
system/crypttab.py
Normal file
332
system/crypttab.py
Normal file
|
@ -0,0 +1,332 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2014, Steve <yo@groks.org>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: crypttab
|
||||
short_description: Encrypted Linux block devices
|
||||
description:
|
||||
- Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab).
|
||||
version_added: "1.8"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or
|
||||
optionaly prefixed with C(/dev/mapper), as it appears in the filesystem. I(/dev/mapper)
|
||||
will be stripped from I(name).
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
state:
|
||||
description:
|
||||
- Use I(present) to add a line to C(/etc/crypttab) or update it's definition
|
||||
if already present. Use I(absent) to remove a line with matching I(name).
|
||||
Use I(opts_present) to add options to those already present; options with
|
||||
different values will be updated. Use I(opts_absent) to remove options from
|
||||
the existing set.
|
||||
required: true
|
||||
choices: [ "present", "absent", "opts_present", "opts_absent"]
|
||||
default: null
|
||||
backing_device:
|
||||
description:
|
||||
- Path to the underlying block device or file, or the UUID of a block-device
|
||||
prefixed with I(UUID=)
|
||||
required: false
|
||||
default: null
|
||||
password:
|
||||
description:
|
||||
- Encryption password, the path to a file containing the pasword, or
|
||||
'none' or '-' if the password should be entered at boot.
|
||||
required: false
|
||||
default: "none"
|
||||
opts:
|
||||
description:
|
||||
- A comma-delimited list of options. See C(crypttab(5) ) for details.
|
||||
required: false
|
||||
path:
|
||||
description:
|
||||
- Path to file to use instead of C(/etc/crypttab). This might be useful
|
||||
in a chroot environment.
|
||||
required: false
|
||||
default: /etc/crypttab
|
||||
|
||||
notes: []
|
||||
requirements: []
|
||||
author: Steve <yo@groks.org>
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Set the options explicitly a deivce which must already exist
|
||||
crypttab: name=luks-home state=present opts=discard,cipher=aes-cbc-essiv:sha256
|
||||
|
||||
- name: Add the 'discard' option to any existing options for all devices
|
||||
crypttab: name={{ item.device }} state=opts_present opts=discard
|
||||
with_items: ansible_mounts
|
||||
when: '/dev/mapper/luks-' in {{ item.device }}
|
||||
'''
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
name = dict(required=True),
|
||||
state = dict(required=True, choices=['present', 'absent', 'opts_present', 'opts_absent']),
|
||||
backing_device = dict(default=None),
|
||||
password = dict(default=None),
|
||||
opts = dict(default=None),
|
||||
path = dict(default='/etc/crypttab')
|
||||
),
|
||||
supports_check_mode = True
|
||||
)
|
||||
|
||||
name = module.params['name'].lstrip('/dev/mapper')
|
||||
backing_device = module.params['backing_device']
|
||||
password = module.params['password']
|
||||
opts = module.params['opts']
|
||||
state = module.params['state']
|
||||
path = module.params['path']
|
||||
|
||||
if backing_device is None and password is None and opts is None:
|
||||
module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'",
|
||||
**module.params)
|
||||
|
||||
if 'opts' in state and (backing_device is not None or password is not None):
|
||||
module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state,
|
||||
**module.params)
|
||||
|
||||
for arg_name, arg in (('name', name),
|
||||
('backing_device', backing_device),
|
||||
('password', password),
|
||||
('opts', opts)):
|
||||
if (arg is not None
|
||||
and (' ' in arg or '\t' in arg or arg == '')):
|
||||
module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name,
|
||||
**module.params)
|
||||
|
||||
try:
|
||||
crypttab = Crypttab(path)
|
||||
existing_line = crypttab.match(name)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="failed to open and parse crypttab file: %s" % e,
|
||||
**module.params)
|
||||
|
||||
if 'present' in state and existing_line is None and backing_device is None:
|
||||
module.fail_json(msg="'backing_device' required to add a new entry",
|
||||
**module.params)
|
||||
|
||||
changed, reason = False, '?'
|
||||
|
||||
if state == 'absent':
|
||||
if existing_line is not None:
|
||||
changed, reason = existing_line.remove()
|
||||
|
||||
elif state == 'present':
|
||||
if existing_line is not None:
|
||||
changed, reason = existing_line.set(backing_device, password, opts)
|
||||
else:
|
||||
changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
|
||||
|
||||
elif state == 'opts_present':
|
||||
if existing_line is not None:
|
||||
changed, reason = existing_line.opts.add(opts)
|
||||
else:
|
||||
changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
|
||||
|
||||
elif state == 'opts_absent':
|
||||
if existing_line is not None:
|
||||
changed, reason = existing_line.opts.remove(opts)
|
||||
|
||||
|
||||
if changed and not module.check_mode:
|
||||
with open(path, 'wb') as f:
|
||||
f.write(str(crypttab))
|
||||
|
||||
module.exit_json(changed=changed, msg=reason, **module.params)
|
||||
|
||||
|
||||
class Crypttab(object):
|
||||
|
||||
_lines = []
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
if not os.path.exists(path):
|
||||
if not os.path.exists(os.path.dirname(path)):
|
||||
os.makedirs(os.path.dirname(path))
|
||||
open(path,'a').close()
|
||||
|
||||
with open(path, 'r') as f:
|
||||
for line in f.readlines():
|
||||
self._lines.append(Line(line))
|
||||
|
||||
def add(self, line):
|
||||
self._lines.append(line)
|
||||
return True, 'added line'
|
||||
|
||||
def lines(self):
|
||||
for line in self._lines:
|
||||
if line.valid():
|
||||
yield line
|
||||
|
||||
def match(self, name):
|
||||
for line in self.lines():
|
||||
if line.name == name:
|
||||
return line
|
||||
return None
|
||||
|
||||
def __str__(self):
|
||||
lines = []
|
||||
for line in self._lines:
|
||||
lines.append(str(line))
|
||||
crypttab = '\n'.join(lines)
|
||||
if crypttab[-1] != '\n':
|
||||
crypttab += '\n'
|
||||
return crypttab
|
||||
|
||||
|
||||
class Line(object):
|
||||
|
||||
def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None):
|
||||
self.line = line
|
||||
self.name = name
|
||||
self.backing_device = backing_device
|
||||
self.password = password
|
||||
self.opts = Options(opts)
|
||||
|
||||
if line is not None:
|
||||
if self._line_valid(line):
|
||||
self.name, backing_device, password, opts = self._split_line(line)
|
||||
|
||||
self.set(backing_device, password, opts)
|
||||
|
||||
def set(self, backing_device, password, opts):
|
||||
changed = False
|
||||
|
||||
if backing_device is not None and self.backing_device != backing_device:
|
||||
self.backing_device = backing_device
|
||||
changed = True
|
||||
|
||||
if password is not None and self.password != password:
|
||||
self.password = password
|
||||
changed = True
|
||||
|
||||
if opts is not None:
|
||||
opts = Options(opts)
|
||||
if opts != self.opts:
|
||||
self.opts = opts
|
||||
changed = True
|
||||
|
||||
return changed, 'updated line'
|
||||
|
||||
def _line_valid(self, line):
|
||||
if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _split_line(self, line):
|
||||
fields = line.split()
|
||||
return (fields[0],
|
||||
fields[1],
|
||||
fields[2] if len(fields) >= 3 else None,
|
||||
fields[3] if len(fields) >= 4 else None)
|
||||
|
||||
def remove(self):
|
||||
self.line, self.name, self.backing_device = '', None, None
|
||||
return True, 'removed line'
|
||||
|
||||
def valid(self):
|
||||
if self.name is not None and self.backing_device is not None:
|
||||
return True
|
||||
return False
|
||||
|
||||
def __str__(self):
|
||||
if self.valid():
|
||||
fields = [self.name, self.backing_device]
|
||||
if self.password is not None or self.opts:
|
||||
fields.append(self.password if self.password is not None else 'none')
|
||||
if self.opts:
|
||||
fields.append(str(self.opts))
|
||||
return ' '.join(fields)
|
||||
return self.line
|
||||
|
||||
|
||||
class Options(dict):
|
||||
"""opts_string looks like: 'discard,foo=bar,baz=greeble' """
|
||||
|
||||
def __init__(self, opts_string):
|
||||
super(Options, self).__init__()
|
||||
self.itemlist = []
|
||||
if opts_string is not None:
|
||||
for opt in opts_string.split(','):
|
||||
kv = opt.split('=')
|
||||
k, v = (kv[0], kv[1]) if len(kv) > 1 else (kv[0], None)
|
||||
self[k] = v
|
||||
|
||||
def add(self, opts_string):
|
||||
changed = False
|
||||
for k, v in Options(opts_string).items():
|
||||
if self.has_key(k):
|
||||
if self[k] != v:
|
||||
changed = True
|
||||
else:
|
||||
changed = True
|
||||
self[k] = v
|
||||
return changed, 'updated options'
|
||||
|
||||
def remove(self, opts_string):
|
||||
changed = False
|
||||
for k in Options(opts_string):
|
||||
if self.has_key(k):
|
||||
del self[k]
|
||||
changed = True
|
||||
return changed, 'removed options'
|
||||
|
||||
def keys(self):
|
||||
return self.itemlist
|
||||
|
||||
def values(self):
|
||||
return [self[key] for key in self]
|
||||
|
||||
def items(self):
|
||||
return [(key, self[key]) for key in self]
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.itemlist)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if not self.has_key(key):
|
||||
self.itemlist.append(key)
|
||||
super(Options, self).__setitem__(key, value)
|
||||
|
||||
def __delitem__(self, key):
|
||||
self.itemlist.remove(key)
|
||||
super(Options, self).__delitem__(key)
|
||||
|
||||
def __ne__(self, obj):
|
||||
return not (isinstance(obj, Options)
|
||||
and sorted(self.items()) == sorted(obj.items()))
|
||||
|
||||
def __str__(self):
|
||||
return ','.join([k if v is None else '%s=%s' % (k, v)
|
||||
for k, v in self.items()])
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
|
@ -26,7 +26,7 @@ module: getent
|
|||
short_description: a wrapper to the unix getent utility
|
||||
description:
|
||||
- Runs getent against one of it's various databases and returns information into
|
||||
the host's facts
|
||||
the host's facts, in a getent_<database> prefixed variable
|
||||
version_added: "1.8"
|
||||
options:
|
||||
database:
|
||||
|
@ -51,7 +51,7 @@ options:
|
|||
description:
|
||||
- If a supplied key is missing this will make the task fail if True
|
||||
|
||||
notes:
|
||||
notes:
|
||||
- "Not all databases support enumeration, check system documentation for details"
|
||||
requirements: [ ]
|
||||
author: Brian Coca
|
||||
|
@ -60,23 +60,23 @@ author: Brian Coca
|
|||
EXAMPLES = '''
|
||||
# get root user info
|
||||
- getent: database=passwd key=root
|
||||
register: root_info
|
||||
- debug: var=getent_passwd
|
||||
|
||||
# get all groups
|
||||
- getent: database=group split=':'
|
||||
register: groups
|
||||
- debug: var=getent_group
|
||||
|
||||
# get all hosts, split by tab
|
||||
- getent: database=hosts
|
||||
register: hosts
|
||||
- debug: var=getent_hosts
|
||||
|
||||
# get http service info, no error if missing
|
||||
- getent: database=services key=http fail_key=False
|
||||
register: http_info
|
||||
- debug: var=getent_services
|
||||
|
||||
# get user password hash (requires sudo/root)
|
||||
- getent: database=shadow key=www-data split=:
|
||||
register: pw_hash
|
||||
- debug: var=getent_shadow
|
||||
|
||||
'''
|
||||
|
||||
|
|
407
system/glusterfs.py
Normal file
407
system/glusterfs.py
Normal file
|
@ -0,0 +1,407 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2014, Taneli Leppä <taneli@crasman.fi>
|
||||
#
|
||||
# This file is part of Ansible (sort of)
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
DOCUMENTATION = """
|
||||
module: glusterfs
|
||||
short_description: manage GlusterFS
|
||||
description:
|
||||
- Manage GlusterFS volumes
|
||||
options:
|
||||
action:
|
||||
required: true
|
||||
choices: [ 'create', 'start', 'stop', 'tune', 'rebalance', 'limit-usage' ]
|
||||
description:
|
||||
- Mode of operation: create volume, start/stop volume, set tuning options, rebalance or set quota
|
||||
name:
|
||||
required: true
|
||||
description:
|
||||
- The volume name
|
||||
hosts:
|
||||
required: false
|
||||
description:
|
||||
- List of hosts to use for probing and brick setup
|
||||
host:
|
||||
required: false
|
||||
description:
|
||||
- Override local hostname (for peer probing purposes)
|
||||
glusterbin:
|
||||
required: false
|
||||
default: /usr/sbin/gluster
|
||||
description:
|
||||
- Override gluster cli path
|
||||
replica:
|
||||
required: false
|
||||
description:
|
||||
- Replica count for volume
|
||||
stripe:
|
||||
required: false
|
||||
description:
|
||||
- Stripe count for volume
|
||||
transport:
|
||||
required: false
|
||||
choices: [ 'tcp', 'rdma', 'tcp,rdma' ]
|
||||
description:
|
||||
- Transport type for volume
|
||||
brick:
|
||||
required: false
|
||||
description:
|
||||
- Brick path on servers
|
||||
start:
|
||||
required: false
|
||||
description:
|
||||
- Controls whether the volume is started after creation or not
|
||||
rebalance:
|
||||
required: false
|
||||
description:
|
||||
- Controls whether the volume is rebalanced after adding bricks or not
|
||||
option:
|
||||
required: false
|
||||
description:
|
||||
- Tuning parameter name when action=tune
|
||||
parameter:
|
||||
required: false
|
||||
description:
|
||||
- Tuning parameter value when action=tune
|
||||
directory:
|
||||
required: false
|
||||
description:
|
||||
- Directory for limit-usage
|
||||
value:
|
||||
required: false
|
||||
description:
|
||||
- Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list)
|
||||
notes:
|
||||
- "Requires cli tools for GlusterFS on servers"
|
||||
- "Will add new bricks, but not remove them"
|
||||
author: Taneli Leppä
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: create gluster volume
|
||||
glusterfs: action=create name=test1 brick=/bricks/brick1/g1 rebalance=yes
|
||||
args:
|
||||
hosts: "{{ play_hosts }}"
|
||||
run_once: true
|
||||
|
||||
- name: tune
|
||||
glusterfs: action=tune name=test1 option=performance.cache-size parameter=256MB
|
||||
run_once: true
|
||||
|
||||
- name: start gluster volume
|
||||
glusterfs: action=start name=test1
|
||||
run_once: true
|
||||
|
||||
- name: limit usage
|
||||
glusterfs: action=limit-usage name=test1 directory=/foo value=20.0MB
|
||||
run_once: true
|
||||
|
||||
- name: stop gluster volume
|
||||
glusterfs: action=stop name=test1
|
||||
run_once: true
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
import socket
|
||||
import re
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
action=dict(required=True, default=None, choices=[ 'create', 'start', 'stop', 'tune', 'rebalance', 'limit-usage' ]),
|
||||
name=dict(required=True, default=None, aliases=['volume']),
|
||||
hosts=dict(required=False, default=None, type='list'),
|
||||
host=dict(required=False, default=None),
|
||||
stripe=dict(required=False, default=None, type='int'),
|
||||
replica=dict(required=False, default=None, type='int'),
|
||||
transport=dict(required=False, default='tcp', choices=[ 'tcp', 'rdma', 'tcp,rdma' ]),
|
||||
brick=dict(required=False, default=None),
|
||||
start=dict(required=False, default='1'),
|
||||
rebalance=dict(required=False, default='0'),
|
||||
option=dict(required=False, default=None),
|
||||
parameter=dict(required=False, default=None),
|
||||
value=dict(required=False, default=None),
|
||||
directory=dict(required=False, default=None),
|
||||
glusterbin=dict(required=False, default='/usr/sbin/gluster'),
|
||||
)
|
||||
)
|
||||
|
||||
changed = False
|
||||
action = module.params['action']
|
||||
volume_name = module.params['name']
|
||||
glusterbin = module.params['glusterbin']
|
||||
hosts = module.params['hosts']
|
||||
brick_path = module.params['brick']
|
||||
stripe = module.params['stripe']
|
||||
replica = module.params['replica']
|
||||
transport = module.params['transport']
|
||||
myhostname = module.params['host']
|
||||
start_volume = module.boolean(module.params['start'])
|
||||
rebalance = module.boolean(module.params['rebalance'])
|
||||
if not myhostname:
|
||||
myhostname = socket.gethostname()
|
||||
if not os.path.exists(glusterbin):
|
||||
module.fail_json(msg='could not find gluster commandline client at %s' % glusterbin)
|
||||
|
||||
set_option = module.params['option']
|
||||
set_parameter = module.params['parameter']
|
||||
value = module.params['value']
|
||||
directory = module.params['directory']
|
||||
|
||||
def run_gluster(gargs, **kwargs):
|
||||
args = [glusterbin]
|
||||
args.extend(gargs)
|
||||
rc, out, err = module.run_command(args, **kwargs)
|
||||
if rc != 0:
|
||||
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err))
|
||||
return out
|
||||
|
||||
def run_gluster_nofail(gargs, **kwargs):
|
||||
args = [glusterbin]
|
||||
args.extend(gargs)
|
||||
rc, out, err = module.run_command(args, **kwargs)
|
||||
if rc != 0:
|
||||
return None
|
||||
return out
|
||||
|
||||
def run_gluster_yes(gargs):
|
||||
args = [glusterbin]
|
||||
args.extend(gargs)
|
||||
rc, out, err = module.run_command(args, data='y\n')
|
||||
if rc != 0:
|
||||
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err))
|
||||
return out
|
||||
|
||||
def get_peers():
|
||||
out = run_gluster([ 'peer', 'status'])
|
||||
i = 0
|
||||
peers = {}
|
||||
hostname = None
|
||||
uuid = None
|
||||
state = None
|
||||
for row in out.split('\n'):
|
||||
if ': ' in row:
|
||||
key, value = row.split(': ')
|
||||
if key.lower() == 'hostname':
|
||||
hostname = value
|
||||
if key.lower() == 'uuid':
|
||||
uuid = value
|
||||
if key.lower() == 'state':
|
||||
state = value
|
||||
peers[hostname] = [ uuid, state ]
|
||||
return peers
|
||||
|
||||
def get_volumes():
|
||||
out = run_gluster([ 'volume', 'info' ])
|
||||
|
||||
volumes = {}
|
||||
volume = {}
|
||||
for row in out.split('\n'):
|
||||
if ': ' in row:
|
||||
key, value = row.split(': ')
|
||||
if key.lower() == 'volume name':
|
||||
volume['name'] = value
|
||||
volume['options'] = {}
|
||||
volume['quota'] = False
|
||||
if key.lower() == 'volume id':
|
||||
volume['id'] = value
|
||||
if key.lower() == 'status':
|
||||
volume['status'] = value
|
||||
if key.lower() == 'transport-type':
|
||||
volume['transport'] = value
|
||||
if key.lower() != 'bricks' and key.lower()[:5] == 'brick':
|
||||
if not 'bricks' in volume:
|
||||
volume['bricks'] = []
|
||||
volume['bricks'].append(value)
|
||||
# Volume options
|
||||
if '.' in key:
|
||||
if not 'options' in volume:
|
||||
volume['options'] = {}
|
||||
volume['options'][key] = value
|
||||
if key == 'features.quota' and value == 'on':
|
||||
volume['quota'] = True
|
||||
else:
|
||||
if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:':
|
||||
if len(volume) > 0:
|
||||
volumes[volume['name']] = volume
|
||||
volume = {}
|
||||
return volumes
|
||||
|
||||
def get_quotas(name, nofail):
|
||||
quotas = {}
|
||||
if nofail:
|
||||
out = run_gluster_nofail([ 'volume', 'quota', name, 'list' ])
|
||||
if not out:
|
||||
return quotas
|
||||
else:
|
||||
out = run_gluster([ 'volume', 'quota', name, 'list' ])
|
||||
for row in out.split('\n'):
|
||||
if row[:1] == '/':
|
||||
q = re.split('\s+', row)
|
||||
quotas[q[0]] = q[1]
|
||||
return quotas
|
||||
|
||||
def wait_for_peer(host):
|
||||
for x in range(0, 4):
|
||||
peers = get_peers()
|
||||
if host in peers and peers[host][1].lower().find('peer in cluster') != -1:
|
||||
return True
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def probe(host):
|
||||
run_gluster([ 'peer', 'probe', host ])
|
||||
if not wait_for_peer(host):
|
||||
module.fail_json(msg='failed to probe peer %s' % host)
|
||||
changed = True
|
||||
|
||||
def probe_all_peers(hosts, peers):
|
||||
for host in hosts:
|
||||
if host not in peers:
|
||||
# dont probe ourselves
|
||||
if myhostname != host:
|
||||
probe(host)
|
||||
|
||||
def create_volume(name, stripe, replica, transport, hosts, brick):
|
||||
args = [ 'volume', 'create' ]
|
||||
args.append(name)
|
||||
if stripe:
|
||||
args.append('stripe')
|
||||
args.append(str(stripe))
|
||||
if replica:
|
||||
args.append('replica')
|
||||
args.append(str(replica))
|
||||
args.append('transport')
|
||||
args.append(transport)
|
||||
for host in hosts:
|
||||
args.append(('%s:%s' % (host, brick)))
|
||||
run_gluster(args)
|
||||
|
||||
def start_volume(name):
|
||||
run_gluster([ 'volume', 'start', name ])
|
||||
|
||||
def stop_volume(name):
|
||||
run_gluster_yes([ 'volume', 'stop', name ])
|
||||
|
||||
def set_volume_option(name, option, parameter):
|
||||
run_gluster([ 'volume', 'set', name, option, parameter ])
|
||||
|
||||
def add_brick(name, brick):
|
||||
run_gluster([ 'volume', 'add-brick', name, brick ])
|
||||
|
||||
def rebalance(name):
|
||||
run_gluster(['volume', 'rebalance', name, 'start'])
|
||||
|
||||
def enable_quota(name):
|
||||
run_gluster([ 'volume', 'quota', name, 'enable' ])
|
||||
|
||||
def set_quota(name, directory, value):
|
||||
run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ])
|
||||
|
||||
#
|
||||
peers = get_peers()
|
||||
volumes = get_volumes()
|
||||
quotas = {}
|
||||
if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started':
|
||||
quotas = get_quotas(volume_name, True)
|
||||
if action == 'create':
|
||||
probe_all_peers(hosts, peers)
|
||||
if volume_name not in volumes:
|
||||
create_volume(volume_name, stripe, replica, transport, hosts, brick_path)
|
||||
changed = True
|
||||
volumes = get_volumes()
|
||||
if volume_name in volumes:
|
||||
if volumes[volume_name]['status'].lower() != 'started' and start_volume:
|
||||
start_volume(volume_name)
|
||||
changed = True
|
||||
|
||||
# switch bricks
|
||||
new_bricks = []
|
||||
removed_bricks = []
|
||||
all_bricks = []
|
||||
for host in hosts:
|
||||
brick = '%s:%s' % (host, brick_path)
|
||||
all_bricks.append(brick)
|
||||
if brick not in volumes[volume_name]['bricks']:
|
||||
new_bricks.append(brick)
|
||||
|
||||
# this module does not yet remove bricks, but we check those anyways
|
||||
for brick in volumes[volume_name]['bricks']:
|
||||
if brick not in all_bricks:
|
||||
removed_bricks.append(brick)
|
||||
|
||||
for brick in new_bricks:
|
||||
add_brick(volume_name, brick)
|
||||
changed = True
|
||||
|
||||
if len(new_bricks) > 0 and rebalance:
|
||||
rebalance(volume_name)
|
||||
|
||||
else:
|
||||
module.fail_json(msg='failed to create volume %s' % volume_name)
|
||||
if action == 'start':
|
||||
if volume_name not in volumes:
|
||||
module.fail_json(msg='volume not found %s' % volume_name)
|
||||
if volumes[volume_name]['status'].lower() != 'started':
|
||||
start_volume(volume_name)
|
||||
volumes = get_volumes()
|
||||
changed = True
|
||||
if action == 'rebalance':
|
||||
if volume_name not in volumes:
|
||||
module.fail_json(msg='volume not found %s' % volume_name)
|
||||
rebalance(volume_name)
|
||||
changed = True
|
||||
if action == 'stop':
|
||||
if volume_name not in volumes:
|
||||
module.fail_json(msg='volume not found %s' % volume_name)
|
||||
if volumes[volume_name]['status'].lower() != 'stopped':
|
||||
stop_volume(volume_name)
|
||||
volumes = get_volumes()
|
||||
changed = True
|
||||
if action == 'tune':
|
||||
if volume_name not in volumes:
|
||||
module.fail_json(msg='volume not found %s' % volume_name)
|
||||
if set_option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][set_option] != set_parameter:
|
||||
set_volume_option(volume_name, set_option, set_parameter)
|
||||
volumes = get_volumes()
|
||||
changed = True
|
||||
if action == 'limit-usage':
|
||||
if volume_name not in volumes:
|
||||
module.fail_json(msg='volume not found %s' % volume_name)
|
||||
if not volumes[volume_name]['quota']:
|
||||
enable_quota(volume_name)
|
||||
|
||||
quotas = get_quotas(volume_name, False)
|
||||
if directory not in quotas:
|
||||
set_quota(volume_name, directory, value)
|
||||
changed = True
|
||||
elif quotas[directory] != value:
|
||||
set_quota(volume_name, directory, value)
|
||||
changed = True
|
||||
facts = {}
|
||||
facts['glusterfs'] = { 'peers': peers, 'volumes': volumes, 'quotas': quotas }
|
||||
|
||||
module.exit_json(changed=changed, ansible_facts=facts)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
|
@ -8,7 +8,7 @@ from subprocess import Popen, PIPE, call
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: locale_gen
|
||||
short_description: Creates of removes locales.
|
||||
short_description: Creates or removes locales.
|
||||
description:
|
||||
- Manages locales by editing /etc/locale.gen and invoking locale-gen.
|
||||
version_added: "1.6"
|
||||
|
|
|
@ -131,7 +131,6 @@ def main():
|
|||
vgoptions = module.params['vg_options'].split()
|
||||
|
||||
if module.params['pvs']:
|
||||
dev_string = ' '.join(module.params['pvs'])
|
||||
dev_list = module.params['pvs']
|
||||
elif state == 'present':
|
||||
module.fail_json(msg="No physical volumes given.")
|
||||
|
@ -188,7 +187,7 @@ def main():
|
|||
else:
|
||||
module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
|
||||
vgcreate_cmd = module.get_bin_path('vgcreate')
|
||||
rc,_,err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', str(pesize), vg, dev_string])
|
||||
rc,_,err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', str(pesize), vg] + dev_list)
|
||||
if rc == 0:
|
||||
changed = True
|
||||
else:
|
||||
|
|
|
@ -52,7 +52,7 @@ options:
|
|||
description:
|
||||
- Select direction for a rule or default policy command.
|
||||
required: false
|
||||
choices: ['in', 'out', 'incoming', 'outgoing']
|
||||
choices: ['in', 'out', 'incoming', 'outgoing', 'routed']
|
||||
logging:
|
||||
description:
|
||||
- Toggles logging. Logged packets use the LOG_KERN syslog facility.
|
||||
|
@ -173,7 +173,7 @@ def main():
|
|||
state = dict(default=None, choices=['enabled', 'disabled', 'reloaded', 'reset']),
|
||||
default = dict(default=None, aliases=['policy'], choices=['allow', 'deny', 'reject']),
|
||||
logging = dict(default=None, choices=['on', 'off', 'low', 'medium', 'high', 'full']),
|
||||
direction = dict(default=None, choices=['in', 'incoming', 'out', 'outgoing']),
|
||||
direction = dict(default=None, choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
|
||||
delete = dict(default=False, type='bool'),
|
||||
insert = dict(default=None),
|
||||
rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']),
|
||||
|
|
|
@ -113,7 +113,7 @@ def main():
|
|||
changed = True
|
||||
|
||||
if state == 'present' and deployed:
|
||||
if module.md5(src) != module.md5(os.path.join(deploy_path, deployment)):
|
||||
if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
|
||||
os.remove(os.path.join(deploy_path, "%s.deployed"%(deployment)))
|
||||
shutil.copyfile(src, os.path.join(deploy_path, deployment))
|
||||
deployed = False
|
||||
|
|
248
windows/win_chocolatey.ps1
Normal file
248
windows/win_chocolatey.ps1
Normal file
|
@ -0,0 +1,248 @@
|
|||
#!powershell
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Copyright 2014, Trond Hindenes <trond@hindenes.com>
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# WANT_JSON
|
||||
# POWERSHELL_COMMON
|
||||
|
||||
function Write-Log
|
||||
{
|
||||
param
|
||||
(
|
||||
[parameter(mandatory=$false)]
|
||||
[System.String]
|
||||
$message
|
||||
)
|
||||
|
||||
$date = get-date -format 'yyyy-MM-dd hh:mm:ss.zz'
|
||||
|
||||
Write-Host "$date | $message"
|
||||
|
||||
Out-File -InputObject "$date $message" -FilePath $global:LoggingFile -Append
|
||||
}
|
||||
|
||||
$params = Parse-Args $args;
|
||||
$result = New-Object PSObject;
|
||||
Set-Attr $result "changed" $false;
|
||||
|
||||
If ($params.package)
|
||||
{
|
||||
$package = $params.package
|
||||
}
|
||||
Else
|
||||
{
|
||||
Fail-Json $result "missing required argument: package"
|
||||
}
|
||||
|
||||
if(($params.logPath).length -gt 0)
|
||||
{
|
||||
$global:LoggingFile = $params.logPath
|
||||
}
|
||||
else
|
||||
{
|
||||
$global:LoggingFile = "c:\ansible-playbook.log"
|
||||
}
|
||||
If ($params.force)
|
||||
{
|
||||
$force = $params.force | ConvertTo-Bool
|
||||
}
|
||||
Else
|
||||
{
|
||||
$force = $false
|
||||
}
|
||||
|
||||
If ($params.version)
|
||||
{
|
||||
$version = $params.version
|
||||
}
|
||||
Else
|
||||
{
|
||||
$version = $null
|
||||
}
|
||||
|
||||
If ($params.showlog)
|
||||
{
|
||||
$showlog = $params.showlog | ConvertTo-Bool
|
||||
}
|
||||
Else
|
||||
{
|
||||
$showlog = $null
|
||||
}
|
||||
|
||||
If ($params.state)
|
||||
{
|
||||
$state = $params.state.ToString().ToLower()
|
||||
If (($state -ne "present") -and ($state -ne "absent"))
|
||||
{
|
||||
Fail-Json $result "state is $state; must be present or absent"
|
||||
}
|
||||
}
|
||||
Else
|
||||
{
|
||||
$state = "present"
|
||||
}
|
||||
|
||||
$ChocoAlreadyInstalled = get-command choco -ErrorAction 0
|
||||
if ($ChocoAlreadyInstalled -eq $null)
|
||||
{
|
||||
#We need to install chocolatey
|
||||
$install_choco_result = iex ((new-object net.webclient).DownloadString("https://chocolatey.org/install.ps1"))
|
||||
$result.changed = $true
|
||||
$executable = "C:\ProgramData\chocolatey\bin\choco.exe"
|
||||
}
|
||||
Else
|
||||
{
|
||||
$executable = "choco.exe"
|
||||
}
|
||||
|
||||
If ($params.source)
|
||||
{
|
||||
$source = $params.source.ToString().ToLower()
|
||||
If (($source -ne "chocolatey") -and ($source -ne "webpi") -and ($source -ne "windowsfeatures") -and ($source -ne "ruby"))
|
||||
{
|
||||
Fail-Json $result "source is $source - must be one of chocolatey, ruby, webpi or windowsfeatures."
|
||||
}
|
||||
}
|
||||
Elseif (!$params.source)
|
||||
{
|
||||
$source = "chocolatey"
|
||||
}
|
||||
|
||||
if ($source -eq "webpi")
|
||||
{
|
||||
# check whether 'webpi' installation source is available; if it isn't, install it
|
||||
$webpi_check_cmd = "$executable list webpicmd -localonly"
|
||||
$webpi_check_result = invoke-expression $webpi_check_cmd
|
||||
Set-Attr $result "chocolatey_bootstrap_webpi_check_cmd" $webpi_check_cmd
|
||||
Set-Attr $result "chocolatey_bootstrap_webpi_check_log" $webpi_check_result
|
||||
if (
|
||||
(
|
||||
($webpi_check_result.GetType().Name -eq "String") -and
|
||||
($webpi_check_result -match "No packages found")
|
||||
) -or
|
||||
($webpi_check_result -contains "No packages found.")
|
||||
)
|
||||
{
|
||||
#lessmsi is a webpicmd dependency, but dependency resolution fails unless it's installed separately
|
||||
$lessmsi_install_cmd = "$executable install lessmsi"
|
||||
$lessmsi_install_result = invoke-expression $lessmsi_install_cmd
|
||||
Set-Attr $result "chocolatey_bootstrap_lessmsi_install_cmd" $lessmsi_install_cmd
|
||||
Set-Attr $result "chocolatey_bootstrap_lessmsi_install_log" $lessmsi_install_result
|
||||
|
||||
$webpi_install_cmd = "$executable install webpicmd"
|
||||
$webpi_install_result = invoke-expression $webpi_install_cmd
|
||||
Set-Attr $result "chocolatey_bootstrap_webpi_install_cmd" $webpi_install_cmd
|
||||
Set-Attr $result "chocolatey_bootstrap_webpi_install_log" $webpi_install_result
|
||||
|
||||
if (($webpi_install_result | select-string "already installed").length -gt 0)
|
||||
{
|
||||
#no change
|
||||
}
|
||||
elseif (($webpi_install_result | select-string "webpicmd has finished successfully").length -gt 0)
|
||||
{
|
||||
$result.changed = $true
|
||||
}
|
||||
Else
|
||||
{
|
||||
Fail-Json $result "WebPI install error: $webpi_install_result"
|
||||
}
|
||||
}
|
||||
}
|
||||
$expression = $executable
|
||||
if ($state -eq "present")
|
||||
{
|
||||
$expression += " install $package"
|
||||
}
|
||||
Elseif ($state -eq "absent")
|
||||
{
|
||||
$expression += " uninstall $package"
|
||||
}
|
||||
if ($force)
|
||||
{
|
||||
if ($state -eq "present")
|
||||
{
|
||||
$expression += " -force"
|
||||
}
|
||||
}
|
||||
if ($version)
|
||||
{
|
||||
$expression += " -version $version"
|
||||
}
|
||||
if ($source -eq "chocolatey")
|
||||
{
|
||||
$expression += " -source https://chocolatey.org/api/v2/"
|
||||
}
|
||||
elseif (($source -eq "windowsfeatures") -or ($source -eq "webpi") -or ($source -eq "ruby"))
|
||||
{
|
||||
$expression += " -source $source"
|
||||
}
|
||||
|
||||
Set-Attr $result "chocolatey command" $expression
|
||||
$op_result = invoke-expression $expression
|
||||
if ($state -eq "present")
|
||||
{
|
||||
if (
|
||||
(($op_result | select-string "already installed").length -gt 0) -or
|
||||
# webpi has different text output, and that doesn't include the package name but instead the human-friendly name
|
||||
(($op_result | select-string "No products to be installed").length -gt 0)
|
||||
)
|
||||
{
|
||||
#no change
|
||||
}
|
||||
elseif (
|
||||
(($op_result | select-string "has finished successfully").length -gt 0) -or
|
||||
# webpi has different text output, and that doesn't include the package name but instead the human-friendly name
|
||||
(($op_result | select-string "Install of Products: SUCCESS").length -gt 0) -or
|
||||
(($op_result | select-string "gem installed").length -gt 0) -or
|
||||
(($op_result | select-string "gems installed").length -gt 0)
|
||||
)
|
||||
{
|
||||
$result.changed = $true
|
||||
}
|
||||
Else
|
||||
{
|
||||
Fail-Json $result "Install error: $op_result"
|
||||
}
|
||||
}
|
||||
Elseif ($state -eq "absent")
|
||||
{
|
||||
$op_result = invoke-expression "$executable uninstall $package"
|
||||
# HACK: Misleading - 'Uninstalling from folder' appears in output even when package is not installed, hence order of checks this way
|
||||
if (
|
||||
(($op_result | select-string "not installed").length -gt 0) -or
|
||||
(($op_result | select-string "Cannot find path").length -gt 0)
|
||||
)
|
||||
{
|
||||
#no change
|
||||
}
|
||||
elseif (($op_result | select-string "Uninstalling from folder").length -gt 0)
|
||||
{
|
||||
$result.changed = $true
|
||||
}
|
||||
else
|
||||
{
|
||||
Fail-Json $result "Uninstall error: $op_result"
|
||||
}
|
||||
}
|
||||
|
||||
if ($showlog)
|
||||
{
|
||||
Set-Attr $result "chocolatey_log" $op_result
|
||||
}
|
||||
Set-Attr $result "chocolatey_success" "true"
|
||||
|
||||
Exit-Json $result;
|
118
windows/win_chocolatey.py
Normal file
118
windows/win_chocolatey.py
Normal file
|
@ -0,0 +1,118 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2014, Trond Hindenes <trond@hindenes.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# this is a windows documentation stub. actual code lives in the .ps1
|
||||
# file of the same name
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: win_chocolatey
|
||||
version_added: "1.8"
|
||||
short_description: Installs packages using chocolatey
|
||||
description:
|
||||
- Installs packages using Chocolatey (http://chocolatey.org/). If Chocolatey is missing from the system, the module will install it. List of packages: http://chocolatey.org/packages.
|
||||
options:
|
||||
package:
|
||||
description:
|
||||
- Name of the package to be installed
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
state:
|
||||
description:
|
||||
- State of the package on the system
|
||||
required: false
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
default: present
|
||||
aliases: []
|
||||
force:
|
||||
description:
|
||||
- Forces install of the package (even if it already exists). Using Force will cause ansible to always report "changed: true" on its run
|
||||
required: false
|
||||
choices:
|
||||
- yes
|
||||
- no
|
||||
default: no
|
||||
aliases: []
|
||||
version:
|
||||
description:
|
||||
- Specific version of the package to be installed
|
||||
- Ignored when state == 'absent'
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
showlog:
|
||||
description:
|
||||
- Outputs the chocolatey log inside a chocolatey_log property.
|
||||
required: false
|
||||
choices:
|
||||
- yes
|
||||
- no
|
||||
default: no
|
||||
aliases: []
|
||||
source:
|
||||
description:
|
||||
- Which source to install from
|
||||
require: false
|
||||
choices:
|
||||
- chocolatey
|
||||
- ruby
|
||||
- webpi
|
||||
- windowsfeatures
|
||||
default: chocolatey
|
||||
aliases: []
|
||||
logPath:
|
||||
description:
|
||||
- Where to log command output to
|
||||
require: false
|
||||
default: c:\\ansible-playbook.log
|
||||
aliases: []
|
||||
author: Trond Hindenes, Peter Mounce
|
||||
'''
|
||||
|
||||
# TODO:
|
||||
# * Better parsing when a package has dependencies - currently fails
|
||||
# * Time each item that is run
|
||||
# * Support 'changed' with gems - would require shelling out to `gem list` first and parsing, kinda defeating the point of using chocolatey.
|
||||
|
||||
EXAMPLES = '''
|
||||
# Install git
|
||||
win_chocolatey:
|
||||
package: git
|
||||
|
||||
# Install notepadplusplus version 6.6
|
||||
win_chocolatey:
|
||||
package: notepadplusplus.install
|
||||
version: 6.6
|
||||
|
||||
# Uninstall git
|
||||
win_chocolatey:
|
||||
package: git
|
||||
state: absent
|
||||
|
||||
# Install Application Request Routing v3 from webpi
|
||||
# Logically, this requires that you install IIS first (see win_feature)
|
||||
# To find a list of packages available via webpi source, `choco list -source webpi`
|
||||
win_chocolatey:
|
||||
package: ARRv3
|
||||
source: webpi
|
||||
'''
|
86
windows/win_updates.ps1
Normal file
86
windows/win_updates.ps1
Normal file
|
@ -0,0 +1,86 @@
|
|||
#!powershell
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Copyright 2014, Trond Hindenes <trond@hindenes.com>
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# WANT_JSON
|
||||
# POWERSHELL_COMMON
|
||||
|
||||
function Write-Log
|
||||
{
|
||||
param
|
||||
(
|
||||
[parameter(mandatory=$false)]
|
||||
[System.String]
|
||||
$message
|
||||
)
|
||||
|
||||
$date = get-date -format 'yyyy-MM-dd hh:mm:ss.zz'
|
||||
|
||||
Write-Host "$date $message"
|
||||
|
||||
Out-File -InputObject "$date $message" -FilePath $global:LoggingFile -Append
|
||||
}
|
||||
|
||||
$params = Parse-Args $args;
|
||||
$result = New-Object PSObject;
|
||||
Set-Attr $result "changed" $false;
|
||||
|
||||
if(($params.logPath).Length -gt 0) {
|
||||
$global:LoggingFile = $params.logPath
|
||||
} else {
|
||||
$global:LoggingFile = "c:\ansible-playbook.log"
|
||||
}
|
||||
if ($params.category) {
|
||||
$category = $params.category
|
||||
} else {
|
||||
$category = "critical"
|
||||
}
|
||||
|
||||
$installed_prior = get-wulist -isinstalled | foreach { $_.KBArticleIDs }
|
||||
set-attr $result "updates_already_present" $installed_prior
|
||||
|
||||
write-log "Looking for updates in '$category'"
|
||||
set-attr $result "updates_category" $category
|
||||
$to_install = get-wulist -category $category
|
||||
$installed = @()
|
||||
foreach ($u in $to_install) {
|
||||
$kb = $u.KBArticleIDs
|
||||
write-log "Installing $kb - $($u.Title)"
|
||||
$install_result = get-wuinstall -KBArticleID $u.KBArticleIDs -acceptall -ignorereboot
|
||||
Set-Attr $result "updates_installed_KB$kb" $u.Title
|
||||
$installed += $kb
|
||||
}
|
||||
write-log "Installed: $($installed.count)"
|
||||
set-attr $result "updates_installed" $installed
|
||||
set-attr $result "updates_installed_count" $installed.count
|
||||
$result.changed = $installed.count -gt 0
|
||||
|
||||
$installed_afterwards = get-wulist -isinstalled | foreach { $_.KBArticleIDs }
|
||||
set-attr $result "updates_installed_afterwards" $installed_afterwards
|
||||
|
||||
$reboot_needed = Get-WURebootStatus
|
||||
write-log $reboot_needed
|
||||
if ($reboot_needed -match "not") {
|
||||
write-log "Reboot not required"
|
||||
} else {
|
||||
write-log "Reboot required"
|
||||
Set-Attr $result "updates_reboot_needed" $true
|
||||
$result.changed = $true
|
||||
}
|
||||
|
||||
Set-Attr $result "updates_success" "true"
|
||||
Exit-Json $result;
|
51
windows/win_updates.py
Normal file
51
windows/win_updates.py
Normal file
|
@ -0,0 +1,51 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2014, Peter Mounce <public@neverrunwithscissors.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# this is a windows documentation stub. actual code lives in the .ps1
|
||||
# file of the same name
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: win_updates
|
||||
version_added: "1.8"
|
||||
short_description: Lists / Installs windows updates
|
||||
description:
|
||||
- Installs windows updates using PSWindowsUpdate (http://gallery.technet.microsoft.com/scriptcenter/2d191bcd-3308-4edd-9de2-88dff796b0bc).
|
||||
- PSWindowsUpdate needs to be installed first - use win_chocolatey.
|
||||
options:
|
||||
category:
|
||||
description:
|
||||
- Which category to install updates from
|
||||
required: false
|
||||
default: critical
|
||||
choices:
|
||||
- critical
|
||||
- security
|
||||
- (anything that is a valid update category)
|
||||
default: critical
|
||||
aliases: []
|
||||
author: Peter Mounce
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Install updates from security category
|
||||
win_updates:
|
||||
category: security
|
||||
'''
|
Loading…
Reference in a new issue