Merge remote-tracking branch 'upstream/devel' into devel
This commit is contained in:
commit
5462748553
26 changed files with 3377 additions and 62 deletions
|
@ -159,9 +159,10 @@ def start_slave(cursor):
|
|||
return started
|
||||
|
||||
|
||||
def changemaster(cursor, chm):
|
||||
SQLPARAM = ",".join(chm)
|
||||
cursor.execute("CHANGE MASTER TO " + SQLPARAM)
|
||||
def changemaster(cursor, chm, chm_params):
|
||||
sql_param = ",".join(chm)
|
||||
query = 'CHANGE MASTER TO %s' % sql_param
|
||||
cursor.execute(query, chm_params)
|
||||
|
||||
|
||||
def strip_quotes(s):
|
||||
|
@ -318,37 +319,52 @@ def main():
|
|||
elif mode in "changemaster":
|
||||
print "Change master"
|
||||
chm=[]
|
||||
chm_params = {}
|
||||
if master_host:
|
||||
chm.append("MASTER_HOST='" + master_host + "'")
|
||||
chm.append("MASTER_HOST=%(master_host)s")
|
||||
chm_params['master_host'] = master_host
|
||||
if master_user:
|
||||
chm.append("MASTER_USER='" + master_user + "'")
|
||||
chm.append("MASTER_USER=%(master_user)s")
|
||||
chm_params['master_user'] = master_user
|
||||
if master_password:
|
||||
chm.append("MASTER_PASSWORD='" + master_password + "'")
|
||||
chm.append("MASTER_PASSWORD=%(master_password)s")
|
||||
chm_params['master_password'] = master_password
|
||||
if master_port:
|
||||
chm.append("MASTER_PORT=" + master_port)
|
||||
chm.append("MASTER_PORT=%(master_port)s")
|
||||
chm_params['master_port'] = master_port
|
||||
if master_connect_retry:
|
||||
chm.append("MASTER_CONNECT_RETRY='" + master_connect_retry + "'")
|
||||
chm.append("MASTER_CONNECT_RETRY=%(master_connect_retry)s")
|
||||
chm_params['master_connect_retry'] = master_connect_retry
|
||||
if master_log_file:
|
||||
chm.append("MASTER_LOG_FILE='" + master_log_file + "'")
|
||||
chm.append("MASTER_LOG_FILE=%(master_log_file)s")
|
||||
chm_params['master_log_file'] = master_log_file
|
||||
if master_log_pos:
|
||||
chm.append("MASTER_LOG_POS=" + master_log_pos)
|
||||
chm.append("MASTER_LOG_POS=%(master_log_pos)s")
|
||||
chm_params['master_log_pos'] = master_log_pos
|
||||
if relay_log_file:
|
||||
chm.append("RELAY_LOG_FILE='" + relay_log_file + "'")
|
||||
chm.append("RELAY_LOG_FILE=%(relay_log_file)s")
|
||||
chm_params['relay_log_file'] = relay_log_file
|
||||
if relay_log_pos:
|
||||
chm.append("RELAY_LOG_POS=" + relay_log_pos)
|
||||
chm.append("RELAY_LOG_POS=%(relay_log_pos)s")
|
||||
chm_params['relay_log_pos'] = relay_log_pos
|
||||
if master_ssl:
|
||||
chm.append("MASTER_SSL=1")
|
||||
if master_ssl_ca:
|
||||
chm.append("MASTER_SSL_CA='" + master_ssl_ca + "'")
|
||||
chm.append("MASTER_SSL_CA=%(master_ssl_ca)s")
|
||||
chm_params['master_ssl_ca'] = master_ssl_ca
|
||||
if master_ssl_capath:
|
||||
chm.append("MASTER_SSL_CAPATH='" + master_ssl_capath + "'")
|
||||
chm.append("MASTER_SSL_CAPATH=%(master_ssl_capath)s")
|
||||
chm_params['master_ssl_capath'] = master_ssl_capath
|
||||
if master_ssl_cert:
|
||||
chm.append("MASTER_SSL_CERT='" + master_ssl_cert + "'")
|
||||
chm.append("MASTER_SSL_CERT=%(master_ssl_cert)s")
|
||||
chm_params['master_ssl_cert'] = master_ssl_cert
|
||||
if master_ssl_key:
|
||||
chm.append("MASTER_SSL_KEY='" + master_ssl_key + "'")
|
||||
chm.append("MASTER_SSL_KEY=%(master_ssl_key)s")
|
||||
chm_params['master_ssl_key'] = master_ssl_key
|
||||
if master_ssl_cipher:
|
||||
chm.append("MASTER_SSL_CIPHER='" + master_ssl_cipher + "'")
|
||||
changemaster(cursor,chm)
|
||||
chm.append("MASTER_SSL_CIPHER=%(master_ssl_cipher)s")
|
||||
chm_params['master_ssl_cipher'] = master_ssl_cipher
|
||||
changemaster(cursor, chm, chm_params)
|
||||
module.exit_json(changed=True)
|
||||
elif mode in "startslave":
|
||||
started = start_slave(cursor)
|
||||
|
@ -366,4 +382,4 @@ def main():
|
|||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
||||
warnings.simplefilter("ignore")
|
||||
warnings.simplefilter("ignore")
|
||||
|
|
|
@ -131,7 +131,6 @@ EXAMPLES='''
|
|||
desc=deployment
|
||||
'''
|
||||
|
||||
import json
|
||||
import datetime
|
||||
import base64
|
||||
|
||||
|
|
168
monitoring/uptimerobot.py
Normal file
168
monitoring/uptimerobot.py
Normal file
|
@ -0,0 +1,168 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
DOCUMENTATION = '''
|
||||
|
||||
module: uptimerobot
|
||||
short_description: Pause and start Uptime Robot monitoring
|
||||
description:
|
||||
- This module will let you start and pause Uptime Robot Monitoring
|
||||
author: Nate Kingsley
|
||||
version_added: "1.9"
|
||||
requirements:
|
||||
- Valid Uptime Robot API Key
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Define whether or not the monitor should be running or paused.
|
||||
required: true
|
||||
default: null
|
||||
choices: [ "started", "paused" ]
|
||||
aliases: []
|
||||
monitorid:
|
||||
description:
|
||||
- ID of the monitor to check.
|
||||
required: true
|
||||
default: null
|
||||
choices: []
|
||||
aliases: []
|
||||
apikey:
|
||||
description:
|
||||
- Uptime Robot API key.
|
||||
required: true
|
||||
default: null
|
||||
choices: []
|
||||
aliases: []
|
||||
notes:
|
||||
- Support for adding and removing monitors and alert contacts has not yet been implemented.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Pause the monitor with an ID of 12345.
|
||||
- uptimerobot: monitorid=12345
|
||||
apikey=12345-1234512345
|
||||
state=paused
|
||||
|
||||
# Start the monitor with an ID of 12345.
|
||||
- uptimerobot: monitorid=12345
|
||||
apikey=12345-1234512345
|
||||
state=started
|
||||
|
||||
'''
|
||||
|
||||
import json
|
||||
import urllib
|
||||
import urllib2
|
||||
import time
|
||||
|
||||
API_BASE = "http://api.uptimerobot.com/"
|
||||
|
||||
API_ACTIONS = dict(
|
||||
status='getMonitors?',
|
||||
editMonitor='editMonitor?'
|
||||
)
|
||||
|
||||
API_FORMAT = 'json'
|
||||
|
||||
API_NOJSONCALLBACK = 1
|
||||
|
||||
CHANGED_STATE = False
|
||||
|
||||
SUPPORTS_CHECK_MODE = False
|
||||
|
||||
def checkID(params):
|
||||
|
||||
data = urllib.urlencode(params)
|
||||
|
||||
full_uri = API_BASE + API_ACTIONS['status'] + data
|
||||
|
||||
req = urllib2.urlopen(full_uri)
|
||||
|
||||
result = req.read()
|
||||
|
||||
jsonresult = json.loads(result)
|
||||
|
||||
req.close()
|
||||
|
||||
return jsonresult
|
||||
|
||||
|
||||
def startMonitor(params):
|
||||
|
||||
params['monitorStatus'] = 1
|
||||
|
||||
data = urllib.urlencode(params)
|
||||
|
||||
full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
|
||||
|
||||
req = urllib2.urlopen(full_uri)
|
||||
|
||||
result = req.read()
|
||||
|
||||
jsonresult = json.loads(result)
|
||||
|
||||
req.close()
|
||||
|
||||
return jsonresult['stat']
|
||||
|
||||
|
||||
def pauseMonitor(params):
|
||||
|
||||
params['monitorStatus'] = 0
|
||||
|
||||
data = urllib.urlencode(params)
|
||||
|
||||
full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
|
||||
|
||||
req = urllib2.urlopen(full_uri)
|
||||
|
||||
result = req.read()
|
||||
|
||||
jsonresult = json.loads(result)
|
||||
|
||||
req.close()
|
||||
|
||||
return jsonresult['stat']
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
state = dict(required=True, choices=['started', 'paused']),
|
||||
apikey = dict(required=True),
|
||||
monitorid = dict(required=True)
|
||||
),
|
||||
supports_check_mode=SUPPORTS_CHECK_MODE
|
||||
)
|
||||
|
||||
params = dict(
|
||||
apiKey=module.params['apikey'],
|
||||
monitors=module.params['monitorid'],
|
||||
monitorID=module.params['monitorid'],
|
||||
format=API_FORMAT,
|
||||
noJsonCallback=API_NOJSONCALLBACK
|
||||
)
|
||||
|
||||
check_result = checkID(params)
|
||||
|
||||
if check_result['stat'] != "ok":
|
||||
module.fail_json(
|
||||
msg="failed",
|
||||
result=check_result['message']
|
||||
)
|
||||
|
||||
if module.params['state'] == 'started':
|
||||
monitor_result = startMonitor(params)
|
||||
else:
|
||||
monitor_result = pauseMonitor(params)
|
||||
|
||||
|
||||
|
||||
module.exit_json(
|
||||
msg="success",
|
||||
result=monitor_result
|
||||
)
|
||||
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
212
monitoring/zabbix_group.py
Normal file
212
monitoring/zabbix_group.py
Normal file
|
@ -0,0 +1,212 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2014, René Moser <mail@renemoser.net>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: zabbix_group
|
||||
short_description: Add or remove a host group to Zabbix.
|
||||
description:
|
||||
- This module uses the Zabbix API to add and remove host groups.
|
||||
version_added: '1.8'
|
||||
requirements: [ 'zabbix-api' ]
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Whether the host group should be added or removed.
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
host_group:
|
||||
description:
|
||||
- Name of the host group to be added or removed.
|
||||
required: true
|
||||
default: null
|
||||
aliases: [ ]
|
||||
server_url:
|
||||
description:
|
||||
- Url of Zabbix server, with protocol (http or https) e.g.
|
||||
https://monitoring.example.com/zabbix. C(url) is an alias
|
||||
for C(server_url). If not set environment variable
|
||||
C(ZABBIX_SERVER_URL) is used.
|
||||
required: true
|
||||
default: null
|
||||
aliases: [ 'url' ]
|
||||
login_user:
|
||||
description:
|
||||
- Zabbix user name. If not set environment variable
|
||||
C(ZABBIX_LOGIN_USER) is used.
|
||||
required: true
|
||||
default: null
|
||||
login_password:
|
||||
description:
|
||||
- Zabbix user password. If not set environment variable
|
||||
C(ZABBIX_LOGIN_PASSWORD) is used.
|
||||
required: true
|
||||
notes:
|
||||
- The module has been tested with Zabbix Server 2.2.
|
||||
author: René Moser
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Add a new host group to Zabbix
|
||||
- zabbix_group: host_group='Linux servers'
|
||||
server_url=https://monitoring.example.com/zabbix
|
||||
login_user=ansible
|
||||
login_password=secure
|
||||
|
||||
# Add a new host group, login data is provided by environment variables:
|
||||
# ZABBIX_LOGIN_USER, ZABBIX_LOGIN_PASSWORD, ZABBIX_SERVER_URL:
|
||||
- zabbix_group: host_group=Webservers
|
||||
|
||||
# Remove a host group from Zabbix
|
||||
- zabbix_group: host_group='Linux servers'
|
||||
state=absent
|
||||
server_url=https://monitoring.example.com/zabbix
|
||||
login_user=ansible
|
||||
login_password=secure
|
||||
'''
|
||||
|
||||
try:
|
||||
from zabbix_api import ZabbixAPI
|
||||
HAS_ZABBIX_API = True
|
||||
except ImportError:
|
||||
HAS_ZABBIX_API = False
|
||||
|
||||
|
||||
def create_group(zbx, host_group):
|
||||
try:
|
||||
result = zbx.hostgroup.create(
|
||||
{
|
||||
'name': host_group
|
||||
}
|
||||
)
|
||||
except BaseException as e:
|
||||
return 1, None, str(e)
|
||||
return 0, result['groupids'], None
|
||||
|
||||
|
||||
def get_group(zbx, host_group):
|
||||
try:
|
||||
result = zbx.hostgroup.get(
|
||||
{
|
||||
'filter':
|
||||
{
|
||||
'name': host_group,
|
||||
}
|
||||
}
|
||||
)
|
||||
except BaseException as e:
|
||||
return 1, None, str(e)
|
||||
|
||||
return 0, result[0]['groupid'], None
|
||||
|
||||
|
||||
def delete_group(zbx, group_id):
|
||||
try:
|
||||
zbx.hostgroup.delete([ group_id ])
|
||||
except BaseException as e:
|
||||
return 1, None, str(e)
|
||||
return 0, None, None
|
||||
|
||||
|
||||
def check_group(zbx, host_group):
|
||||
try:
|
||||
result = zbx.hostgroup.exists(
|
||||
{
|
||||
'name': host_group
|
||||
}
|
||||
)
|
||||
except BaseException as e:
|
||||
return 1, None, str(e)
|
||||
return 0, result, None
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
host_group=dict(required=True, default=None),
|
||||
server_url=dict(default=None, aliases=['url']),
|
||||
login_user=dict(default=None),
|
||||
login_password=dict(default=None),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not HAS_ZABBIX_API:
|
||||
module.fail_json(msg='Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)')
|
||||
|
||||
try:
|
||||
login_user = module.params['login_user'] or os.environ['ZABBIX_LOGIN_USER']
|
||||
login_password = module.params['login_password'] or os.environ['ZABBIX_LOGIN_PASSWORD']
|
||||
server_url = module.params['server_url'] or os.environ['ZABBIX_SERVER_URL']
|
||||
except KeyError, e:
|
||||
module.fail_json(msg='Missing login data: %s is not set.' % e.message)
|
||||
|
||||
host_group = module.params['host_group']
|
||||
state = module.params['state']
|
||||
|
||||
try:
|
||||
zbx = ZabbixAPI(server_url)
|
||||
zbx.login(login_user, login_password)
|
||||
except BaseException as e:
|
||||
module.fail_json(msg='Failed to connect to Zabbix server: %s' % e)
|
||||
|
||||
changed = False
|
||||
msg = ''
|
||||
|
||||
if state == 'present':
|
||||
(rc, exists, error) = check_group(zbx, host_group)
|
||||
if rc != 0:
|
||||
module.fail_json(msg='Failed to check host group %s existance: %s' % (host_group, error))
|
||||
if not exists:
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
else:
|
||||
(rc, group, error) = create_group(zbx, host_group)
|
||||
if rc == 0:
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg='Failed to get host group: %s' % error)
|
||||
|
||||
if state == 'absent':
|
||||
(rc, exists, error) = check_group(zbx, host_group)
|
||||
if rc != 0:
|
||||
module.fail_json(msg='Failed to check host group %s existance: %s' % (host_group, error))
|
||||
if exists:
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
else:
|
||||
(rc, group_id, error) = get_group(zbx, host_group)
|
||||
if rc != 0:
|
||||
module.fail_json(msg='Failed to get host group: %s' % error)
|
||||
|
||||
(rc, _, error) = delete_group(zbx, group_id)
|
||||
if rc == 0:
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg='Failed to remove host group: %s' % error)
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
252
network/haproxy.py
Normal file
252
network/haproxy.py
Normal file
|
@ -0,0 +1,252 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2014, Ravi Bhure <ravibhure@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: haproxy
|
||||
version_added: "1.9"
|
||||
short_description: An Ansible module to handle states enable/disable server and set weight to backend host in haproxy using socket commands.
|
||||
description:
|
||||
- The Enable Haproxy Backend Server, with
|
||||
supports get current weight for server (default) and
|
||||
set weight for haproxy backend server when provides.
|
||||
|
||||
- The Disable Haproxy Backend Server, with
|
||||
supports get current weight for server (default) and
|
||||
shutdown sessions while disabling backend host server.
|
||||
notes:
|
||||
- "enable or disable commands are restricted and can only be issued on sockets configured for level 'admin', "
|
||||
- "Check - http://haproxy.1wt.eu/download/1.5/doc/configuration.txt, "
|
||||
- "Example: 'stats socket /var/run/haproxy.sock level admin'"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- describe the desired state of the given host in lb pool.
|
||||
required: true
|
||||
default: null
|
||||
choices: [ "enabled", "disabled" ]
|
||||
host:
|
||||
description:
|
||||
- Host (backend) to operate in Haproxy.
|
||||
required: true
|
||||
default: null
|
||||
socket:
|
||||
description:
|
||||
- Haproxy socket file name with path.
|
||||
required: false
|
||||
default: /var/run/haproxy.sock
|
||||
backend:
|
||||
description:
|
||||
- Name of the haproxy backend pool.
|
||||
Required, else auto-detection applied.
|
||||
required: false
|
||||
default: auto-detected
|
||||
weight:
|
||||
description:
|
||||
- The value passed in argument. If the value ends with the '%' sign, then the new weight will be relative to the initially cnfigured weight. Relative weights are only permitted between 0 and 100% and absolute weights are permitted between 0 and 256.
|
||||
required: false
|
||||
default: null
|
||||
shutdown_sessions:
|
||||
description:
|
||||
- When disabling server, immediately terminate all the sessions attached to the specified server. This can be used to terminate long-running sessions after a server is put into maintenance mode, for instance.
|
||||
required: false
|
||||
default: false
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
examples:
|
||||
|
||||
# disable server in 'www' backend pool
|
||||
- haproxy: state=disabled host={{ inventory_hostname }} backend=www
|
||||
|
||||
# disable server without backend pool name (apply to all available backend pool)
|
||||
- haproxy: state=disabled host={{ inventory_hostname }}
|
||||
|
||||
# disable server, provide socket file
|
||||
- haproxy: state=disabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock backend=www
|
||||
|
||||
# disable backend server in 'www' backend pool and drop open sessions to it
|
||||
- haproxy: state=disabled host={{ inventory_hostname }} backend=www socket=/var/run/haproxy.sock shutdown_sessions=true
|
||||
|
||||
# enable server in 'www' backend pool
|
||||
- haproxy: state=enabled host={{ inventory_hostname }} backend=www
|
||||
|
||||
# enable server in 'www' backend pool with change server(s) weight
|
||||
- haproxy: state=enabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock weight=10 backend=www
|
||||
|
||||
author: Ravi Bhure <ravibhure@gmail.com>
|
||||
'''
|
||||
|
||||
import socket
|
||||
|
||||
|
||||
DEFAULT_SOCKET_LOCATION="/var/run/haproxy.sock"
|
||||
RECV_SIZE = 1024
|
||||
ACTION_CHOICES = ['enabled', 'disabled']
|
||||
|
||||
######################################################################
|
||||
class TimeoutException(Exception):
|
||||
pass
|
||||
|
||||
class HAProxy(object):
|
||||
"""
|
||||
Used for communicating with HAProxy through its local UNIX socket interface.
|
||||
Perform common tasks in Haproxy related to enable server and
|
||||
disable server.
|
||||
|
||||
The complete set of external commands Haproxy handles is documented
|
||||
on their website:
|
||||
|
||||
http://haproxy.1wt.eu/download/1.5/doc/configuration.txt#Unix Socket commands
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
self.state = self.module.params['state']
|
||||
self.host = self.module.params['host']
|
||||
self.backend = self.module.params['backend']
|
||||
self.weight = self.module.params['weight']
|
||||
self.socket = self.module.params['socket']
|
||||
self.shutdown_sessions = self.module.params['shutdown_sessions']
|
||||
|
||||
self.command_results = []
|
||||
|
||||
def execute(self, cmd, timeout=200):
|
||||
"""
|
||||
Executes a HAProxy command by sending a message to a HAProxy's local
|
||||
UNIX socket and waiting up to 'timeout' milliseconds for the response.
|
||||
"""
|
||||
|
||||
self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
self.client.connect(self.socket)
|
||||
self.client.sendall('%s\n' % cmd)
|
||||
result = ''
|
||||
buf = ''
|
||||
buf = self.client.recv(RECV_SIZE)
|
||||
while buf:
|
||||
result += buf
|
||||
buf = self.client.recv(RECV_SIZE)
|
||||
self.command_results = result.strip()
|
||||
self.client.close()
|
||||
return result
|
||||
|
||||
def enabled(self, host, backend, weight):
|
||||
"""
|
||||
Enabled action, marks server to UP and checks are re-enabled,
|
||||
also supports to get current weight for server (default) and
|
||||
set the weight for haproxy backend server when provides.
|
||||
"""
|
||||
svname = host
|
||||
if self.backend is None:
|
||||
output = self.execute('show stat')
|
||||
#sanitize and make a list of lines
|
||||
output = output.lstrip('# ').strip()
|
||||
output = output.split('\n')
|
||||
result = output
|
||||
|
||||
for line in result:
|
||||
if 'BACKEND' in line:
|
||||
result = line.split(',')[0]
|
||||
pxname = result
|
||||
cmd = "get weight %s/%s ; enable server %s/%s" % (pxname, svname, pxname, svname)
|
||||
if weight:
|
||||
cmd += "; set weight %s/%s %s" % (pxname, svname, weight)
|
||||
self.execute(cmd)
|
||||
|
||||
else:
|
||||
pxname = backend
|
||||
cmd = "get weight %s/%s ; enable server %s/%s" % (pxname, svname, pxname, svname)
|
||||
if weight:
|
||||
cmd += "; set weight %s/%s %s" % (pxname, svname, weight)
|
||||
self.execute(cmd)
|
||||
|
||||
def disabled(self, host, backend, shutdown_sessions):
|
||||
"""
|
||||
Disabled action, marks server to DOWN for maintenance. In this mode, no more checks will be
|
||||
performed on the server until it leaves maintenance,
|
||||
also it shutdown sessions while disabling backend host server.
|
||||
"""
|
||||
svname = host
|
||||
if self.backend is None:
|
||||
output = self.execute('show stat')
|
||||
#sanitize and make a list of lines
|
||||
output = output.lstrip('# ').strip()
|
||||
output = output.split('\n')
|
||||
result = output
|
||||
|
||||
for line in result:
|
||||
if 'BACKEND' in line:
|
||||
result = line.split(',')[0]
|
||||
pxname = result
|
||||
cmd = "get weight %s/%s ; disable server %s/%s" % (pxname, svname, pxname, svname)
|
||||
if shutdown_sessions:
|
||||
cmd += "; shutdown sessions server %s/%s" % (pxname, svname)
|
||||
self.execute(cmd)
|
||||
|
||||
else:
|
||||
pxname = backend
|
||||
cmd = "get weight %s/%s ; disable server %s/%s" % (pxname, svname, pxname, svname)
|
||||
if shutdown_sessions:
|
||||
cmd += "; shutdown sessions server %s/%s" % (pxname, svname)
|
||||
self.execute(cmd)
|
||||
|
||||
def act(self):
|
||||
"""
|
||||
Figure out what you want to do from ansible, and then do it.
|
||||
"""
|
||||
|
||||
# toggle enable/disbale server
|
||||
if self.state == 'enabled':
|
||||
self.enabled(self.host, self.backend, self.weight)
|
||||
|
||||
elif self.state == 'disabled':
|
||||
self.disabled(self.host, self.backend, self.shutdown_sessions)
|
||||
|
||||
else:
|
||||
self.module.fail_json(msg="unknown state specified: '%s'" % self.state)
|
||||
|
||||
self.module.exit_json(stdout=self.command_results, changed=True)
|
||||
|
||||
def main():
|
||||
|
||||
# load ansible module object
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
state = dict(required=True, default=None, choices=ACTION_CHOICES),
|
||||
host=dict(required=True, default=None),
|
||||
backend=dict(required=False, default=None),
|
||||
weight=dict(required=False, default=None),
|
||||
socket = dict(required=False, default=DEFAULT_SOCKET_LOCATION),
|
||||
shutdown_sessions=dict(required=False, default=False),
|
||||
),
|
||||
|
||||
)
|
||||
|
||||
if not socket:
|
||||
module.fail_json(msg="unable to locate haproxy socket")
|
||||
|
||||
ansible_haproxy = HAProxy(module, **module.params)
|
||||
ansible_haproxy.act()
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
main()
|
366
network/snmp_facts.py
Executable file
366
network/snmp_facts.py
Executable file
|
@ -0,0 +1,366 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# This file is part of Networklore's snmp library for Ansible
|
||||
#
|
||||
# The module is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# The module is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: snmp_facts
|
||||
version_added: "1.9"
|
||||
author: Patrick Ogenstad (@networklore)
|
||||
short_description: Retrive facts for a device using SNMP.
|
||||
description:
|
||||
- Retrieve facts for a device using SNMP, the facts will be
|
||||
inserted to the ansible_facts key.
|
||||
requirements:
|
||||
- pysnmp
|
||||
options:
|
||||
host:
|
||||
description:
|
||||
- Set to target snmp server (normally {{inventory_hostname}})
|
||||
required: true
|
||||
version:
|
||||
description:
|
||||
- SNMP Version to use, v2/v2c or v3
|
||||
choices: [ 'v2', 'v2c', 'v3' ]
|
||||
required: true
|
||||
community:
|
||||
description:
|
||||
- The SNMP community string, required if version is v2/v2c
|
||||
required: false
|
||||
level:
|
||||
description:
|
||||
- Authentication level, required if version is v3
|
||||
choices: [ 'authPriv', 'authNoPriv' ]
|
||||
required: false
|
||||
username:
|
||||
description:
|
||||
- Username for SNMPv3, required if version is v3
|
||||
required: false
|
||||
integrity:
|
||||
description:
|
||||
- Hashing algoritm, required if version is v3
|
||||
choices: [ 'md5', 'sha' ]
|
||||
required: false
|
||||
authkey:
|
||||
description:
|
||||
- Authentication key, required if version is v3
|
||||
required: false
|
||||
privacy:
|
||||
description:
|
||||
- Encryption algoritm, required if level is authPriv
|
||||
choices: [ 'des', 'aes' ]
|
||||
required: false
|
||||
privkey:
|
||||
description:
|
||||
- Encryption key, required if version is authPriv
|
||||
required: false
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Gather facts with SNMP version 2
|
||||
- snmp_facts: host={{ inventory_hostname }} version=2c community=public
|
||||
connection: local
|
||||
|
||||
# Gather facts using SNMP version 3
|
||||
- snmp_facts:
|
||||
host={{ inventory_hostname }}
|
||||
version=v3
|
||||
level=authPriv
|
||||
integrity=sha
|
||||
privacy=aes
|
||||
username=snmp-user
|
||||
authkey=abc12345
|
||||
privkey=def6789
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from collections import defaultdict
|
||||
|
||||
try:
|
||||
from pysnmp.entity.rfc3413.oneliner import cmdgen
|
||||
has_pysnmp = True
|
||||
except:
|
||||
has_pysnmp = False
|
||||
|
||||
class DefineOid(object):
|
||||
|
||||
def __init__(self,dotprefix=False):
|
||||
if dotprefix:
|
||||
dp = "."
|
||||
else:
|
||||
dp = ""
|
||||
|
||||
# From SNMPv2-MIB
|
||||
self.sysDescr = dp + "1.3.6.1.2.1.1.1.0"
|
||||
self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0"
|
||||
self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0"
|
||||
self.sysContact = dp + "1.3.6.1.2.1.1.4.0"
|
||||
self.sysName = dp + "1.3.6.1.2.1.1.5.0"
|
||||
self.sysLocation = dp + "1.3.6.1.2.1.1.6.0"
|
||||
|
||||
# From IF-MIB
|
||||
self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1"
|
||||
self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2"
|
||||
self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4"
|
||||
self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5"
|
||||
self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6"
|
||||
self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7"
|
||||
self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8"
|
||||
self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18"
|
||||
|
||||
# From IP-MIB
|
||||
self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1"
|
||||
self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2"
|
||||
self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3"
|
||||
|
||||
|
||||
def decode_hex(hexstring):
|
||||
|
||||
if len(hexstring) < 3:
|
||||
return hexstring
|
||||
if hexstring[:2] == "0x":
|
||||
return hexstring[2:].decode("hex")
|
||||
else:
|
||||
return hexstring
|
||||
|
||||
def decode_mac(hexstring):
|
||||
|
||||
if len(hexstring) != 14:
|
||||
return hexstring
|
||||
if hexstring[:2] == "0x":
|
||||
return hexstring[2:]
|
||||
else:
|
||||
return hexstring
|
||||
|
||||
def lookup_adminstatus(int_adminstatus):
|
||||
adminstatus_options = {
|
||||
1: 'up',
|
||||
2: 'down',
|
||||
3: 'testing'
|
||||
}
|
||||
if int_adminstatus in adminstatus_options.keys():
|
||||
return adminstatus_options[int_adminstatus]
|
||||
else:
|
||||
return ""
|
||||
|
||||
def lookup_operstatus(int_operstatus):
|
||||
operstatus_options = {
|
||||
1: 'up',
|
||||
2: 'down',
|
||||
3: 'testing',
|
||||
4: 'unknown',
|
||||
5: 'dormant',
|
||||
6: 'notPresent',
|
||||
7: 'lowerLayerDown'
|
||||
}
|
||||
if int_operstatus in operstatus_options.keys():
|
||||
return operstatus_options[int_operstatus]
|
||||
else:
|
||||
return ""
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
host=dict(required=True),
|
||||
version=dict(required=True, choices=['v2', 'v2c', 'v3']),
|
||||
community=dict(required=False, default=False),
|
||||
username=dict(required=False),
|
||||
level=dict(required=False, choices=['authNoPriv', 'authPriv']),
|
||||
integrity=dict(required=False, choices=['md5', 'sha']),
|
||||
privacy=dict(required=False, choices=['des', 'aes']),
|
||||
authkey=dict(required=False),
|
||||
privkey=dict(required=False),
|
||||
removeplaceholder=dict(required=False)),
|
||||
required_together = ( ['username','level','integrity','authkey'],['privacy','privkey'],),
|
||||
supports_check_mode=False)
|
||||
|
||||
m_args = module.params
|
||||
|
||||
if not has_pysnmp:
|
||||
module.fail_json(msg='Missing required pysnmp module (check docs)')
|
||||
|
||||
cmdGen = cmdgen.CommandGenerator()
|
||||
|
||||
# Verify that we receive a community when using snmp v2
|
||||
if m_args['version'] == "v2" or m_args['version'] == "v2c":
|
||||
if m_args['community'] == False:
|
||||
module.fail_json(msg='Community not set when using snmp version 2')
|
||||
|
||||
if m_args['version'] == "v3":
|
||||
if m_args['username'] == None:
|
||||
module.fail_json(msg='Username not set when using snmp version 3')
|
||||
|
||||
if m_args['level'] == "authPriv" and m_args['privacy'] == None:
|
||||
module.fail_json(msg='Privacy algorithm not set when using authPriv')
|
||||
|
||||
|
||||
if m_args['integrity'] == "sha":
|
||||
integrity_proto = cmdgen.usmHMACSHAAuthProtocol
|
||||
elif m_args['integrity'] == "md5":
|
||||
integrity_proto = cmdgen.usmHMACMD5AuthProtocol
|
||||
|
||||
if m_args['privacy'] == "aes":
|
||||
privacy_proto = cmdgen.usmAesCfb128Protocol
|
||||
elif m_args['privacy'] == "des":
|
||||
privacy_proto = cmdgen.usmDESPrivProtocol
|
||||
|
||||
# Use SNMP Version 2
|
||||
if m_args['version'] == "v2" or m_args['version'] == "v2c":
|
||||
snmp_auth = cmdgen.CommunityData(m_args['community'])
|
||||
|
||||
# Use SNMP Version 3 with authNoPriv
|
||||
elif m_args['level'] == "authNoPriv":
|
||||
snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto)
|
||||
|
||||
# Use SNMP Version 3 with authPriv
|
||||
else:
|
||||
snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto, privProtocol=privacy_proto)
|
||||
|
||||
# Use p to prefix OIDs with a dot for polling
|
||||
p = DefineOid(dotprefix=True)
|
||||
# Use v without a prefix to use with return values
|
||||
v = DefineOid(dotprefix=False)
|
||||
|
||||
Tree = lambda: defaultdict(Tree)
|
||||
|
||||
results = Tree()
|
||||
|
||||
errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
|
||||
snmp_auth,
|
||||
cmdgen.UdpTransportTarget((m_args['host'], 161)),
|
||||
cmdgen.MibVariable(p.sysDescr,),
|
||||
cmdgen.MibVariable(p.sysObjectId,),
|
||||
cmdgen.MibVariable(p.sysUpTime,),
|
||||
cmdgen.MibVariable(p.sysContact,),
|
||||
cmdgen.MibVariable(p.sysName,),
|
||||
cmdgen.MibVariable(p.sysLocation,),
|
||||
)
|
||||
|
||||
|
||||
if errorIndication:
|
||||
module.fail_json(msg=str(errorIndication))
|
||||
|
||||
for oid, val in varBinds:
|
||||
current_oid = oid.prettyPrint()
|
||||
current_val = val.prettyPrint()
|
||||
if current_oid == v.sysDescr:
|
||||
results['ansible_sysdescr'] = decode_hex(current_val)
|
||||
elif current_oid == v.sysObjectId:
|
||||
results['ansible_sysobjectid'] = current_val
|
||||
elif current_oid == v.sysUpTime:
|
||||
results['ansible_sysuptime'] = current_val
|
||||
elif current_oid == v.sysContact:
|
||||
results['ansible_syscontact'] = current_val
|
||||
elif current_oid == v.sysName:
|
||||
results['ansible_sysname'] = current_val
|
||||
elif current_oid == v.sysLocation:
|
||||
results['ansible_syslocation'] = current_val
|
||||
|
||||
errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
|
||||
snmp_auth,
|
||||
cmdgen.UdpTransportTarget((m_args['host'], 161)),
|
||||
cmdgen.MibVariable(p.ifIndex,),
|
||||
cmdgen.MibVariable(p.ifDescr,),
|
||||
cmdgen.MibVariable(p.ifMtu,),
|
||||
cmdgen.MibVariable(p.ifSpeed,),
|
||||
cmdgen.MibVariable(p.ifPhysAddress,),
|
||||
cmdgen.MibVariable(p.ifAdminStatus,),
|
||||
cmdgen.MibVariable(p.ifOperStatus,),
|
||||
cmdgen.MibVariable(p.ipAdEntAddr,),
|
||||
cmdgen.MibVariable(p.ipAdEntIfIndex,),
|
||||
cmdgen.MibVariable(p.ipAdEntNetMask,),
|
||||
|
||||
cmdgen.MibVariable(p.ifAlias,),
|
||||
)
|
||||
|
||||
|
||||
if errorIndication:
|
||||
module.fail_json(msg=str(errorIndication))
|
||||
|
||||
interface_indexes = []
|
||||
|
||||
all_ipv4_addresses = []
|
||||
ipv4_networks = Tree()
|
||||
|
||||
for varBinds in varTable:
|
||||
for oid, val in varBinds:
|
||||
current_oid = oid.prettyPrint()
|
||||
current_val = val.prettyPrint()
|
||||
if v.ifIndex in current_oid:
|
||||
ifIndex = int(current_oid.rsplit('.', 1)[-1])
|
||||
results['ansible_interfaces'][ifIndex]['ifindex'] = current_val
|
||||
interface_indexes.append(ifIndex)
|
||||
if v.ifDescr in current_oid:
|
||||
ifIndex = int(current_oid.rsplit('.', 1)[-1])
|
||||
results['ansible_interfaces'][ifIndex]['name'] = current_val
|
||||
if v.ifMtu in current_oid:
|
||||
ifIndex = int(current_oid.rsplit('.', 1)[-1])
|
||||
results['ansible_interfaces'][ifIndex]['mtu'] = current_val
|
||||
if v.ifMtu in current_oid:
|
||||
ifIndex = int(current_oid.rsplit('.', 1)[-1])
|
||||
results['ansible_interfaces'][ifIndex]['speed'] = current_val
|
||||
if v.ifPhysAddress in current_oid:
|
||||
ifIndex = int(current_oid.rsplit('.', 1)[-1])
|
||||
results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val)
|
||||
if v.ifAdminStatus in current_oid:
|
||||
ifIndex = int(current_oid.rsplit('.', 1)[-1])
|
||||
results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val))
|
||||
if v.ifOperStatus in current_oid:
|
||||
ifIndex = int(current_oid.rsplit('.', 1)[-1])
|
||||
results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val))
|
||||
if v.ipAdEntAddr in current_oid:
|
||||
curIPList = current_oid.rsplit('.', 4)[-4:]
|
||||
curIP = ".".join(curIPList)
|
||||
ipv4_networks[curIP]['address'] = current_val
|
||||
all_ipv4_addresses.append(current_val)
|
||||
if v.ipAdEntIfIndex in current_oid:
|
||||
curIPList = current_oid.rsplit('.', 4)[-4:]
|
||||
curIP = ".".join(curIPList)
|
||||
ipv4_networks[curIP]['interface'] = current_val
|
||||
if v.ipAdEntNetMask in current_oid:
|
||||
curIPList = current_oid.rsplit('.', 4)[-4:]
|
||||
curIP = ".".join(curIPList)
|
||||
ipv4_networks[curIP]['netmask'] = current_val
|
||||
|
||||
if v.ifAlias in current_oid:
|
||||
ifIndex = int(current_oid.rsplit('.', 1)[-1])
|
||||
results['ansible_interfaces'][ifIndex]['description'] = current_val
|
||||
|
||||
interface_to_ipv4 = {}
|
||||
for ipv4_network in ipv4_networks:
|
||||
current_interface = ipv4_networks[ipv4_network]['interface']
|
||||
current_network = {
|
||||
'address': ipv4_networks[ipv4_network]['address'],
|
||||
'netmask': ipv4_networks[ipv4_network]['netmask']
|
||||
}
|
||||
if not current_interface in interface_to_ipv4:
|
||||
interface_to_ipv4[current_interface] = []
|
||||
interface_to_ipv4[current_interface].append(current_network)
|
||||
else:
|
||||
interface_to_ipv4[current_interface].append(current_network)
|
||||
|
||||
for interface in interface_to_ipv4:
|
||||
results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface]
|
||||
|
||||
results['ansible_all_ipv4_addresses'] = all_ipv4_addresses
|
||||
|
||||
module.exit_json(ansible_facts=results)
|
||||
|
||||
|
||||
main()
|
||||
|
|
@ -96,4 +96,5 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
main()
|
||||
|
|
|
@ -105,8 +105,7 @@ EXAMPLES = """
|
|||
|
||||
"""
|
||||
|
||||
|
||||
SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s'
|
||||
SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s'
|
||||
|
||||
def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse):
|
||||
payload = dict(text=text)
|
||||
|
@ -128,11 +127,11 @@ def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoj
|
|||
return payload
|
||||
|
||||
def do_notify_slack(module, domain, token, payload):
|
||||
slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, token)
|
||||
slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (token)
|
||||
|
||||
response, info = fetch_url(module, slack_incoming_webhook, data=payload)
|
||||
if info['status'] != 200:
|
||||
obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, '[obscured]')
|
||||
obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]')
|
||||
module.fail_json(msg=" failed to send %s to %s: %s" % (payload, obscured_incoming_webhook, info['msg']))
|
||||
|
||||
def main():
|
||||
|
@ -170,4 +169,4 @@ def main():
|
|||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
main()
|
||||
main()
|
||||
|
|
187
packaging/bower.py
Normal file
187
packaging/bower.py
Normal file
|
@ -0,0 +1,187 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2014, Michael Warkentin <mwarkentin@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: bower
|
||||
short_description: Manage bower packages with bower
|
||||
description:
|
||||
- Manage bower packages with bower
|
||||
version_added: 1.7
|
||||
author: Michael Warkentin
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of a bower package to install
|
||||
required: false
|
||||
offline:
|
||||
description:
|
||||
- Install packages from local cache, if the packages were installed before
|
||||
required: false
|
||||
default: no
|
||||
choices: [ "yes", "no" ]
|
||||
path:
|
||||
description:
|
||||
- The base path where to install the bower packages
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- The state of the bower package
|
||||
required: false
|
||||
default: present
|
||||
choices: [ "present", "absent", "latest" ]
|
||||
version:
|
||||
description:
|
||||
- The version to be installed
|
||||
required: false
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
description: Install "bootstrap" bower package.
|
||||
- bower: name=bootstrap
|
||||
|
||||
description: Install "bootstrap" bower package on version 3.1.1.
|
||||
- bower: name=bootstrap version=3.1.1
|
||||
|
||||
description: Remove the "bootstrap" bower package.
|
||||
- bower: name=bootstrap state=absent
|
||||
|
||||
description: Install packages based on bower.json.
|
||||
- bower: path=/app/location
|
||||
|
||||
description: Update packages based on bower.json to their latest version.
|
||||
- bower: path=/app/location state=latest
|
||||
'''
|
||||
|
||||
|
||||
class Bower(object):
|
||||
def __init__(self, module, **kwargs):
|
||||
self.module = module
|
||||
self.name = kwargs['name']
|
||||
self.offline = kwargs['offline']
|
||||
self.path = kwargs['path']
|
||||
self.version = kwargs['version']
|
||||
|
||||
if kwargs['version']:
|
||||
self.name_version = self.name + '#' + self.version
|
||||
else:
|
||||
self.name_version = self.name
|
||||
|
||||
def _exec(self, args, run_in_check_mode=False, check_rc=True):
|
||||
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
|
||||
cmd = ["bower"] + args
|
||||
|
||||
if self.name:
|
||||
cmd.append(self.name_version)
|
||||
|
||||
if self.offline:
|
||||
cmd.append('--offline')
|
||||
|
||||
# If path is specified, cd into that path and run the command.
|
||||
cwd = None
|
||||
if self.path:
|
||||
if not os.path.exists(self.path):
|
||||
os.makedirs(self.path)
|
||||
if not os.path.isdir(self.path):
|
||||
self.module.fail_json(msg="path %s is not a directory" % self.path)
|
||||
cwd = self.path
|
||||
|
||||
rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
|
||||
return out
|
||||
return ''
|
||||
|
||||
def list(self):
|
||||
cmd = ['list', '--json']
|
||||
|
||||
installed = list()
|
||||
missing = list()
|
||||
outdated = list()
|
||||
data = json.loads(self._exec(cmd, True, False))
|
||||
if 'dependencies' in data:
|
||||
for dep in data['dependencies']:
|
||||
if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:
|
||||
missing.append(dep)
|
||||
elif data['dependencies'][dep]['pkgMeta']['version'] != data['dependencies'][dep]['update']['latest']:
|
||||
outdated.append(dep)
|
||||
elif 'incompatible' in data['dependencies'][dep] and data['dependencies'][dep]['incompatible']:
|
||||
outdated.append(dep)
|
||||
else:
|
||||
installed.append(dep)
|
||||
# Named dependency not installed
|
||||
else:
|
||||
missing.append(self.name)
|
||||
|
||||
return installed, missing, outdated
|
||||
|
||||
def install(self):
|
||||
return self._exec(['install'])
|
||||
|
||||
def update(self):
|
||||
return self._exec(['update'])
|
||||
|
||||
def uninstall(self):
|
||||
return self._exec(['uninstall'])
|
||||
|
||||
|
||||
def main():
|
||||
arg_spec = dict(
|
||||
name=dict(default=None),
|
||||
offline=dict(default='no', type='bool'),
|
||||
path=dict(required=True),
|
||||
state=dict(default='present', choices=['present', 'absent', 'latest', ]),
|
||||
version=dict(default=None),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=arg_spec
|
||||
)
|
||||
|
||||
name = module.params['name']
|
||||
offline = module.params['offline']
|
||||
path = module.params['path']
|
||||
state = module.params['state']
|
||||
version = module.params['version']
|
||||
|
||||
if state == 'absent' and not name:
|
||||
module.fail_json(msg='uninstalling a package is only available for named packages')
|
||||
|
||||
bower = Bower(module, name=name, offline=offline, path=path, version=version)
|
||||
|
||||
changed = False
|
||||
if state == 'present':
|
||||
installed, missing, outdated = bower.list()
|
||||
if len(missing):
|
||||
changed = True
|
||||
bower.install()
|
||||
elif state == 'latest':
|
||||
installed, missing, outdated = bower.list()
|
||||
if len(missing) or len(outdated):
|
||||
changed = True
|
||||
bower.update()
|
||||
else: # Absent
|
||||
installed, missing, outdated = bower.list()
|
||||
if name in installed:
|
||||
changed = True
|
||||
bower.uninstall()
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
# Import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
841
packaging/dnf.py
Normal file
841
packaging/dnf.py
Normal file
|
@ -0,0 +1,841 @@
|
|||
#!/usr/bin/python -tt
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Written by Cristian van Ee <cristian at cvee.org>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
|
||||
import traceback
|
||||
import os
|
||||
import dnf
|
||||
|
||||
try:
|
||||
from dnf import find_unfinished_transactions, find_ts_remaining
|
||||
from rpmUtils.miscutils import splitFilename
|
||||
transaction_helpers = True
|
||||
except:
|
||||
transaction_helpers = False
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: dnf
|
||||
version_added: historical
|
||||
short_description: Manages packages with the I(dnf) package manager
|
||||
description:
|
||||
- Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: dnf -y update. You can also pass a url or a local path to a rpm file."
|
||||
required: true
|
||||
version_added: "1.8"
|
||||
default: null
|
||||
aliases: []
|
||||
list:
|
||||
description:
|
||||
- Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples.
|
||||
required: false
|
||||
version_added: "1.8"
|
||||
default: null
|
||||
state:
|
||||
description:
|
||||
- Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
|
||||
required: false
|
||||
choices: [ "present", "latest", "absent" ]
|
||||
version_added: "1.8"
|
||||
default: "present"
|
||||
enablerepo:
|
||||
description:
|
||||
- I(Repoid) of repositories to enable for the install/update operation.
|
||||
These repos will not persist beyond the transaction.
|
||||
When specifying multiple repos, separate them with a ",".
|
||||
required: false
|
||||
version_added: "1.8"
|
||||
default: null
|
||||
aliases: []
|
||||
|
||||
disablerepo:
|
||||
description:
|
||||
- I(Repoid) of repositories to disable for the install/update operation.
|
||||
These repos will not persist beyond the transaction.
|
||||
When specifying multiple repos, separate them with a ",".
|
||||
required: false
|
||||
version_added: "1.8"
|
||||
default: null
|
||||
aliases: []
|
||||
|
||||
conf_file:
|
||||
description:
|
||||
- The remote dnf configuration file to use for the transaction.
|
||||
required: false
|
||||
version_added: "1.8"
|
||||
default: null
|
||||
aliases: []
|
||||
|
||||
disable_gpg_check:
|
||||
description:
|
||||
- Whether to disable the GPG checking of signatures of packages being
|
||||
installed. Has an effect only if state is I(present) or I(latest).
|
||||
required: false
|
||||
version_added: "1.8"
|
||||
default: "no"
|
||||
choices: ["yes", "no"]
|
||||
aliases: []
|
||||
|
||||
notes: []
|
||||
# informational: requirements for nodes
|
||||
requirements: [ dnf ]
|
||||
author: Cristian van Ee
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: install the latest version of Apache
|
||||
dnf: name=httpd state=latest
|
||||
|
||||
- name: remove the Apache package
|
||||
dnf: name=httpd state=absent
|
||||
|
||||
- name: install the latest version of Apache from the testing repo
|
||||
dnf: name=httpd enablerepo=testing state=present
|
||||
|
||||
- name: upgrade all packages
|
||||
dnf: name=* state=latest
|
||||
|
||||
- name: install the nginx rpm from a remote repo
|
||||
dnf: name=http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present
|
||||
|
||||
- name: install nginx rpm from a local file
|
||||
dnf: name=/usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present
|
||||
|
||||
- name: install the 'Development tools' package group
|
||||
dnf: name="@Development tools" state=present
|
||||
|
||||
'''
|
||||
|
||||
def_qf = "%{name}-%{version}-%{release}.%{arch}"
|
||||
|
||||
repoquery='/usr/bin/repoquery'
|
||||
if not os.path.exists(repoquery):
|
||||
repoquery = None
|
||||
|
||||
dnfbin='/usr/bin/dnf'
|
||||
|
||||
import syslog
|
||||
|
||||
def log(msg):
|
||||
syslog.openlog('ansible-dnf', 0, syslog.LOG_USER)
|
||||
syslog.syslog(syslog.LOG_NOTICE, msg)
|
||||
|
||||
def dnf_base(conf_file=None, cachedir=False):
|
||||
|
||||
my = dnf.Base()
|
||||
my.logging.verbose_level=0
|
||||
my.logging.verbose_level=0
|
||||
if conf_file and os.path.exists(conf_file):
|
||||
my.config = conf_file
|
||||
if cachedir or os.geteuid() != 0:
|
||||
if cachedir or os.geteuid() != 0:
|
||||
if hasattr(my, 'setCacheDir'):
|
||||
my.setCacheDir()
|
||||
else:
|
||||
cachedir = cachedir.dnf.Conf()
|
||||
my.repos.setCacheDir(cachedir)
|
||||
my.conf.cache = 0
|
||||
|
||||
return my
|
||||
|
||||
def install_dnf_utils(module):
|
||||
|
||||
if not module.check_mode:
|
||||
dnf_path = module.get_bin_path('dnf')
|
||||
if dnf_path:
|
||||
rc, so, se = module.run_command('%s -y install dnf-plugins-core' % dnf_path)
|
||||
if rc == 0:
|
||||
this_path = module.get_bin_path('repoquery')
|
||||
global repoquery
|
||||
repoquery = this_path
|
||||
|
||||
def po_to_nevra(po):
|
||||
|
||||
if hasattr(po, 'ui_nevra'):
|
||||
return po.ui_nevra
|
||||
else:
|
||||
return '%s-%s-%s.%s' % (po.name, po.version, po.release, po.arch)
|
||||
|
||||
def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[], is_pkg=False):
|
||||
|
||||
if not repoq:
|
||||
|
||||
pkgs = []
|
||||
try:
|
||||
my = dnf_base(conf_file)
|
||||
for rid in en_repos:
|
||||
my.repos.enableRepo(rid)
|
||||
for rid in dis_repos:
|
||||
my.repos.disableRepo(rid)
|
||||
|
||||
e,m,u = my.rpmdb.matchPackageNames([pkgspec])
|
||||
pkgs = e + m
|
||||
if not pkgs:
|
||||
pkgs.extend(my.returnInstalledPackagesByDep(pkgspec))
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Failure talking to dnf: %s" % e)
|
||||
|
||||
return [ po_to_nevra(p) for p in pkgs ]
|
||||
|
||||
else:
|
||||
|
||||
cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, pkgspec]
|
||||
rc,out,err = module.run_command(cmd)
|
||||
if not is_pkg:
|
||||
cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, "--whatprovides", pkgspec]
|
||||
rc2,out2,err2 = module.run_command(cmd)
|
||||
else:
|
||||
rc2,out2,err2 = (0, '', '')
|
||||
|
||||
if rc == 0 and rc2 == 0:
|
||||
out += out2
|
||||
return [ p for p in out.split('\n') if p.strip() ]
|
||||
else:
|
||||
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
|
||||
|
||||
return []
|
||||
|
||||
def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]):
|
||||
|
||||
if not repoq:
|
||||
|
||||
pkgs = []
|
||||
try:
|
||||
my = dnf_base(conf_file)
|
||||
for rid in en_repos:
|
||||
my.repos.enableRepo(rid)
|
||||
for rid in dis_repos:
|
||||
my.repos.disableRepo(rid)
|
||||
|
||||
e,m,u = my.pkgSack.matchPackageNames([pkgspec])
|
||||
pkgs = e + m
|
||||
if not pkgs:
|
||||
pkgs.extend(my.returnPackagesByDep(pkgspec))
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Failure talking to dnf: %s" % e)
|
||||
|
||||
return [ po_to_nevra(p) for p in pkgs ]
|
||||
|
||||
else:
|
||||
myrepoq = list(repoq)
|
||||
|
||||
for repoid in dis_repos:
|
||||
r_cmd = ['--disablerepo', repoid]
|
||||
myrepoq.extend(r_cmd)
|
||||
|
||||
for repoid in en_repos:
|
||||
r_cmd = ['--enablerepo', repoid]
|
||||
myrepoq.extend(r_cmd)
|
||||
|
||||
cmd = myrepoq + ["--qf", qf, pkgspec]
|
||||
rc,out,err = module.run_command(cmd)
|
||||
if rc == 0:
|
||||
return [ p for p in out.split('\n') if p.strip() ]
|
||||
else:
|
||||
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
|
||||
|
||||
|
||||
return []
|
||||
|
||||
def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]):
|
||||
|
||||
if not repoq:
|
||||
|
||||
retpkgs = []
|
||||
pkgs = []
|
||||
updates = []
|
||||
|
||||
try:
|
||||
my = dnf_base(conf_file)
|
||||
for rid in en_repos:
|
||||
my.repos.enableRepo(rid)
|
||||
for rid in dis_repos:
|
||||
my.repos.disableRepo(rid)
|
||||
|
||||
pkgs = my.returnPackagesByDep(pkgspec) + my.returnInstalledPackagesByDep(pkgspec)
|
||||
if not pkgs:
|
||||
e,m,u = my.pkgSack.matchPackageNames([pkgspec])
|
||||
pkgs = e + m
|
||||
updates = my.doPackageLists(pkgnarrow='updates').updates
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Failure talking to dnf: %s" % e)
|
||||
|
||||
for pkg in pkgs:
|
||||
if pkg in updates:
|
||||
retpkgs.append(pkg)
|
||||
|
||||
return set([ po_to_nevra(p) for p in retpkgs ])
|
||||
|
||||
else:
|
||||
myrepoq = list(repoq)
|
||||
for repoid in dis_repos:
|
||||
r_cmd = ['--disablerepo', repoid]
|
||||
myrepoq.extend(r_cmd)
|
||||
|
||||
for repoid in en_repos:
|
||||
r_cmd = ['--enablerepo', repoid]
|
||||
myrepoq.extend(r_cmd)
|
||||
|
||||
cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec]
|
||||
rc,out,err = module.run_command(cmd)
|
||||
|
||||
if rc == 0:
|
||||
return set([ p for p in out.split('\n') if p.strip() ])
|
||||
else:
|
||||
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
|
||||
|
||||
return []
|
||||
|
||||
def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]):
|
||||
|
||||
if not repoq:
|
||||
|
||||
pkgs = []
|
||||
try:
|
||||
my = dnf_base(conf_file)
|
||||
for rid in en_repos:
|
||||
my.repos.enableRepo(rid)
|
||||
for rid in dis_repos:
|
||||
my.repos.disableRepo(rid)
|
||||
|
||||
pkgs = my.returnPackagesByDep(req_spec) + my.returnInstalledPackagesByDep(req_spec)
|
||||
if not pkgs:
|
||||
e,m,u = my.pkgSack.matchPackageNames([req_spec])
|
||||
pkgs.extend(e)
|
||||
pkgs.extend(m)
|
||||
e,m,u = my.rpmdb.matchPackageNames([req_spec])
|
||||
pkgs.extend(e)
|
||||
pkgs.extend(m)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Failure talking to dnf: %s" % e)
|
||||
|
||||
return set([ po_to_nevra(p) for p in pkgs ])
|
||||
|
||||
else:
|
||||
myrepoq = list(repoq)
|
||||
for repoid in dis_repos:
|
||||
r_cmd = ['--disablerepo', repoid]
|
||||
myrepoq.extend(r_cmd)
|
||||
|
||||
for repoid in en_repos:
|
||||
r_cmd = ['--enablerepo', repoid]
|
||||
myrepoq.extend(r_cmd)
|
||||
|
||||
cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec]
|
||||
rc,out,err = module.run_command(cmd)
|
||||
cmd = myrepoq + ["--qf", qf, req_spec]
|
||||
rc2,out2,err2 = module.run_command(cmd)
|
||||
if rc == 0 and rc2 == 0:
|
||||
out += out2
|
||||
pkgs = set([ p for p in out.split('\n') if p.strip() ])
|
||||
if not pkgs:
|
||||
pkgs = is_installed(module, repoq, req_spec, conf_file, qf=qf)
|
||||
return pkgs
|
||||
else:
|
||||
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
|
||||
|
||||
return []
|
||||
|
||||
def transaction_exists(pkglist):
|
||||
"""
|
||||
checks the package list to see if any packages are
|
||||
involved in an incomplete transaction
|
||||
"""
|
||||
|
||||
conflicts = []
|
||||
if not transaction_helpers:
|
||||
return conflicts
|
||||
|
||||
# first, we create a list of the package 'nvreas'
|
||||
# so we can compare the pieces later more easily
|
||||
pkglist_nvreas = []
|
||||
for pkg in pkglist:
|
||||
pkglist_nvreas.append(splitFilename(pkg))
|
||||
|
||||
# next, we build the list of packages that are
|
||||
# contained within an unfinished transaction
|
||||
unfinished_transactions = find_unfinished_transactions()
|
||||
for trans in unfinished_transactions:
|
||||
steps = find_ts_remaining(trans)
|
||||
for step in steps:
|
||||
# the action is install/erase/etc., but we only
|
||||
# care about the package spec contained in the step
|
||||
(action, step_spec) = step
|
||||
(n,v,r,e,a) = splitFilename(step_spec)
|
||||
# and see if that spec is in the list of packages
|
||||
# requested for installation/updating
|
||||
for pkg in pkglist_nvreas:
|
||||
# if the name and arch match, we're going to assume
|
||||
# this package is part of a pending transaction
|
||||
# the label is just for display purposes
|
||||
label = "%s-%s" % (n,a)
|
||||
if n == pkg[0] and a == pkg[4]:
|
||||
if label not in conflicts:
|
||||
conflicts.append("%s-%s" % (n,a))
|
||||
break
|
||||
return conflicts
|
||||
|
||||
def local_nvra(module, path):
|
||||
"""return nvra of a local rpm passed in"""
|
||||
|
||||
cmd = ['/bin/rpm', '-qp' ,'--qf',
|
||||
'%{name}-%{version}-%{release}.%{arch}\n', path ]
|
||||
rc, out, err = module.run_command(cmd)
|
||||
if rc != 0:
|
||||
return None
|
||||
nvra = out.split('\n')[0]
|
||||
return nvra
|
||||
|
||||
def pkg_to_dict(pkgstr):
|
||||
|
||||
if pkgstr.strip():
|
||||
n,e,v,r,a,repo = pkgstr.split('|')
|
||||
else:
|
||||
return {'error_parsing': pkgstr}
|
||||
|
||||
d = {
|
||||
'name':n,
|
||||
'arch':a,
|
||||
'epoch':e,
|
||||
'release':r,
|
||||
'version':v,
|
||||
'repo':repo,
|
||||
'nevra': '%s:%s-%s-%s.%s' % (e,n,v,r,a)
|
||||
}
|
||||
|
||||
if repo == 'installed':
|
||||
d['dnfstate'] = 'installed'
|
||||
else:
|
||||
d['dnfstate'] = 'available'
|
||||
|
||||
return d
|
||||
|
||||
def repolist(module, repoq, qf="%{repoid}"):
|
||||
|
||||
cmd = repoq + ["--qf", qf, "-a"]
|
||||
rc,out,err = module.run_command(cmd)
|
||||
ret = []
|
||||
if rc == 0:
|
||||
ret = set([ p for p in out.split('\n') if p.strip() ])
|
||||
return ret
|
||||
|
||||
def list_stuff(module, conf_file, stuff):
|
||||
|
||||
qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}"
|
||||
repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet', '-q']
|
||||
if conf_file and os.path.exists(conf_file):
|
||||
repoq += ['-c', conf_file]
|
||||
|
||||
if stuff == 'installed':
|
||||
return [ pkg_to_dict(p) for p in is_installed(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
|
||||
elif stuff == 'updates':
|
||||
return [ pkg_to_dict(p) for p in is_update(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
|
||||
elif stuff == 'available':
|
||||
return [ pkg_to_dict(p) for p in is_available(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
|
||||
elif stuff == 'repos':
|
||||
return [ dict(repoid=name, state='enabled') for name in repolist(module, repoq) if name.strip() ]
|
||||
else:
|
||||
return [ pkg_to_dict(p) for p in is_installed(module, repoq, stuff, conf_file, qf=qf) + is_available(module, repoq, stuff, conf_file, qf=qf) if p.strip() ]
|
||||
|
||||
def install(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos):
|
||||
|
||||
res = {}
|
||||
res['results'] = []
|
||||
res['msg'] = ''
|
||||
res['rc'] = 0
|
||||
res['changed'] = False
|
||||
|
||||
for spec in items:
|
||||
pkg = None
|
||||
|
||||
# check if pkgspec is installed (if possible for idempotence)
|
||||
# localpkg
|
||||
if spec.endswith('.rpm') and '://' not in spec:
|
||||
# get the pkg name-v-r.arch
|
||||
if not os.path.exists(spec):
|
||||
res['msg'] += "No Package file matching '%s' found on system" % spec
|
||||
module.fail_json(**res)
|
||||
|
||||
nvra = local_nvra(module, spec)
|
||||
# look for them in the rpmdb
|
||||
if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
# if they are there, skip it
|
||||
continue
|
||||
pkg = spec
|
||||
|
||||
# URL
|
||||
elif '://' in spec:
|
||||
pkg = spec
|
||||
|
||||
#groups :(
|
||||
elif spec.startswith('@'):
|
||||
# complete wild ass guess b/c it's a group
|
||||
pkg = spec
|
||||
|
||||
# range requires or file-requires or pkgname :(
|
||||
else:
|
||||
# most common case is the pkg is already installed and done
|
||||
# short circuit all the bs - and search for it as a pkg in is_installed
|
||||
# if you find it then we're done
|
||||
if not set(['*','?']).intersection(set(spec)):
|
||||
pkgs = is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True)
|
||||
if pkgs:
|
||||
res['results'].append('%s providing %s is already installed' % (pkgs[0], spec))
|
||||
continue
|
||||
|
||||
# look up what pkgs provide this
|
||||
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos)
|
||||
if not pkglist:
|
||||
res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
|
||||
module.fail_json(**res)
|
||||
|
||||
# if any of the packages are involved in a transaction, fail now
|
||||
# so that we don't hang on the dnf operation later
|
||||
conflicts = transaction_exists(pkglist)
|
||||
if len(conflicts) > 0:
|
||||
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
|
||||
module.fail_json(**res)
|
||||
|
||||
# if any of them are installed
|
||||
# then nothing to do
|
||||
|
||||
found = False
|
||||
for this in pkglist:
|
||||
if is_installed(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True):
|
||||
found = True
|
||||
res['results'].append('%s providing %s is already installed' % (this, spec))
|
||||
break
|
||||
|
||||
# if the version of the pkg you have installed is not in ANY repo, but there are
|
||||
# other versions in the repos (both higher and lower) then the previous checks won't work.
|
||||
# so we check one more time. This really only works for pkgname - not for file provides or virt provides
|
||||
# but virt provides should be all caught in what_provides on its own.
|
||||
# highly irritating
|
||||
if not found:
|
||||
if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
found = True
|
||||
res['results'].append('package providing %s is already installed' % (spec))
|
||||
|
||||
if found:
|
||||
continue
|
||||
|
||||
# if not - then pass in the spec as what to install
|
||||
# we could get here if nothing provides it but that's not
|
||||
# the error we're catching here
|
||||
pkg = spec
|
||||
|
||||
cmd = dnf_basecmd + ['install', pkg]
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
changed = True
|
||||
|
||||
rc, out, err = module.run_command(cmd)
|
||||
|
||||
# Fail on invalid urls:
|
||||
if (rc == 1 and '://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)):
|
||||
err = 'Package at %s could not be installed' % spec
|
||||
module.fail_json(changed=False,msg=err,rc=1)
|
||||
elif (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out:
|
||||
# avoid failing in the 'Nothing To Do' case
|
||||
# this may happen with an URL spec.
|
||||
# for an already installed group,
|
||||
# we get rc = 0 and 'Nothing to do' in out, not in err.
|
||||
rc = 0
|
||||
err = ''
|
||||
out = '%s: Nothing to do' % spec
|
||||
changed = False
|
||||
|
||||
res['rc'] += rc
|
||||
res['results'].append(out)
|
||||
res['msg'] += err
|
||||
|
||||
# FIXME - if we did an install - go and check the rpmdb to see if it actually installed
|
||||
# look for the pkg in rpmdb
|
||||
# look for the pkg via obsoletes
|
||||
|
||||
# accumulate any changes
|
||||
res['changed'] |= changed
|
||||
|
||||
module.exit_json(**res)
|
||||
|
||||
|
||||
def remove(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos):
|
||||
|
||||
res = {}
|
||||
res['results'] = []
|
||||
res['msg'] = ''
|
||||
res['changed'] = False
|
||||
res['rc'] = 0
|
||||
|
||||
for pkg in items:
|
||||
is_group = False
|
||||
# group remove - this is doom on a stick
|
||||
if pkg.startswith('@'):
|
||||
is_group = True
|
||||
else:
|
||||
if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
res['results'].append('%s is not installed' % pkg)
|
||||
continue
|
||||
|
||||
# run an actual dnf transaction
|
||||
cmd = dnf_basecmd + ["remove", pkg]
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
rc, out, err = module.run_command(cmd)
|
||||
|
||||
res['rc'] += rc
|
||||
res['results'].append(out)
|
||||
res['msg'] += err
|
||||
|
||||
# compile the results into one batch. If anything is changed
|
||||
# then mark changed
|
||||
# at the end - if we've end up failed then fail out of the rest
|
||||
# of the process
|
||||
|
||||
# at this point we should check to see if the pkg is no longer present
|
||||
|
||||
if not is_group: # we can't sensibly check for a group being uninstalled reliably
|
||||
# look to see if the pkg shows up from is_installed. If it doesn't
|
||||
if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
res['changed'] = True
|
||||
else:
|
||||
module.fail_json(**res)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(**res)
|
||||
|
||||
module.exit_json(**res)
|
||||
|
||||
def latest(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos):
|
||||
|
||||
res = {}
|
||||
res['results'] = []
|
||||
res['msg'] = ''
|
||||
res['changed'] = False
|
||||
res['rc'] = 0
|
||||
|
||||
for spec in items:
|
||||
|
||||
pkg = None
|
||||
basecmd = 'update'
|
||||
cmd = ''
|
||||
# groups, again
|
||||
if spec.startswith('@'):
|
||||
pkg = spec
|
||||
|
||||
elif spec == '*': #update all
|
||||
# use check-update to see if there is any need
|
||||
rc,out,err = module.run_command(dnf_basecmd + ['check-update'])
|
||||
if rc == 100:
|
||||
cmd = dnf_basecmd + [basecmd]
|
||||
else:
|
||||
res['results'].append('All packages up to date')
|
||||
continue
|
||||
|
||||
# dep/pkgname - find it
|
||||
else:
|
||||
if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
basecmd = 'update'
|
||||
else:
|
||||
basecmd = 'install'
|
||||
|
||||
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos)
|
||||
if not pkglist:
|
||||
res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
|
||||
module.fail_json(**res)
|
||||
|
||||
nothing_to_do = True
|
||||
for this in pkglist:
|
||||
if basecmd == 'install' and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
nothing_to_do = False
|
||||
break
|
||||
|
||||
if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=en_repos):
|
||||
nothing_to_do = False
|
||||
break
|
||||
|
||||
if nothing_to_do:
|
||||
res['results'].append("All packages providing %s are up to date" % spec)
|
||||
continue
|
||||
|
||||
# if any of the packages are involved in a transaction, fail now
|
||||
# so that we don't hang on the dnf operation later
|
||||
conflicts = transaction_exists(pkglist)
|
||||
if len(conflicts) > 0:
|
||||
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
|
||||
module.fail_json(**res)
|
||||
|
||||
pkg = spec
|
||||
if not cmd:
|
||||
cmd = dnf_basecmd + [basecmd, pkg]
|
||||
|
||||
if module.check_mode:
|
||||
return module.exit_json(changed=True)
|
||||
|
||||
rc, out, err = module.run_command(cmd)
|
||||
|
||||
res['rc'] += rc
|
||||
res['results'].append(out)
|
||||
res['msg'] += err
|
||||
|
||||
# FIXME if it is - update it and check to see if it applied
|
||||
# check to see if there is no longer an update available for the pkgspec
|
||||
|
||||
if rc:
|
||||
res['failed'] = True
|
||||
else:
|
||||
res['changed'] = True
|
||||
|
||||
module.exit_json(**res)
|
||||
|
||||
def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo,
|
||||
disable_gpg_check):
|
||||
|
||||
# take multiple args comma separated
|
||||
items = pkgspec.split(',')
|
||||
|
||||
# need debug level 2 to get 'Nothing to do' for groupinstall.
|
||||
dnf_basecmd = [dnfbin, '-d', '2', '-y']
|
||||
|
||||
|
||||
if not repoquery:
|
||||
repoq = None
|
||||
else:
|
||||
repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet', '-q']
|
||||
|
||||
if conf_file and os.path.exists(conf_file):
|
||||
dnf_basecmd += ['-c', conf_file]
|
||||
if repoq:
|
||||
repoq += ['-c', conf_file]
|
||||
|
||||
dis_repos =[]
|
||||
en_repos = []
|
||||
if disablerepo:
|
||||
dis_repos = disablerepo.split(',')
|
||||
if enablerepo:
|
||||
en_repos = enablerepo.split(',')
|
||||
|
||||
for repoid in dis_repos:
|
||||
r_cmd = ['--disablerepo=%s' % repoid]
|
||||
dnf_basecmd.extend(r_cmd)
|
||||
|
||||
for repoid in en_repos:
|
||||
r_cmd = ['--enablerepo=%s' % repoid]
|
||||
dnf_basecmd.extend(r_cmd)
|
||||
|
||||
if state in ['installed', 'present', 'latest']:
|
||||
my = dnf_base(conf_file)
|
||||
try:
|
||||
for r in dis_repos:
|
||||
my.repos.disableRepo(r)
|
||||
|
||||
current_repos = dnf.yum.config.RepoConf()
|
||||
for r in en_repos:
|
||||
try:
|
||||
my.repos.enableRepo(r)
|
||||
new_repos = my.repos.repos.keys()
|
||||
for i in new_repos:
|
||||
if not i in current_repos:
|
||||
rid = my.repos.getRepo(i)
|
||||
a = rid.repoXML.repoid
|
||||
current_repos = new_repos
|
||||
except dnf.exceptions.Error, e:
|
||||
module.fail_json(msg="Error setting/accessing repo %s: %s" % (r, e))
|
||||
except dnf.exceptions.Error, e:
|
||||
module.fail_json(msg="Error accessing repos: %s" % e)
|
||||
|
||||
if state in ['installed', 'present']:
|
||||
if disable_gpg_check:
|
||||
dnf_basecmd.append('--nogpgcheck')
|
||||
install(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos)
|
||||
elif state in ['removed', 'absent']:
|
||||
remove(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos)
|
||||
elif state == 'latest':
|
||||
if disable_gpg_check:
|
||||
dnf_basecmd.append('--nogpgcheck')
|
||||
latest(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos)
|
||||
|
||||
# should be caught by AnsibleModule argument_spec
|
||||
return dict(changed=False, failed=True, results='', errors='unexpected state')
|
||||
|
||||
def main():
|
||||
|
||||
# state=installed name=pkgspec
|
||||
# state=removed name=pkgspec
|
||||
# state=latest name=pkgspec
|
||||
#
|
||||
# informational commands:
|
||||
# list=installed
|
||||
# list=updates
|
||||
# list=available
|
||||
# list=repos
|
||||
# list=pkgspec
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
name=dict(aliases=['pkg']),
|
||||
# removed==absent, installed==present, these are accepted as aliases
|
||||
state=dict(default='installed', choices=['absent','present','installed','removed','latest']),
|
||||
enablerepo=dict(),
|
||||
disablerepo=dict(),
|
||||
list=dict(),
|
||||
conf_file=dict(default=None),
|
||||
disable_gpg_check=dict(required=False, default="no", type='bool'),
|
||||
# this should not be needed, but exists as a failsafe
|
||||
install_repoquery=dict(required=False, default="yes", type='bool'),
|
||||
),
|
||||
required_one_of = [['name','list']],
|
||||
mutually_exclusive = [['name','list']],
|
||||
supports_check_mode = True
|
||||
)
|
||||
|
||||
# this should not be needed, but exists as a failsafe
|
||||
params = module.params
|
||||
if params['install_repoquery'] and not repoquery and not module.check_mode:
|
||||
install_dnf_utils(module)
|
||||
|
||||
if params['list']:
|
||||
if not repoquery:
|
||||
module.fail_json(msg="repoquery is required to use list= with this module. Please install the dnf-utils package.")
|
||||
results = dict(results=list_stuff(module, params['conf_file'], params['list']))
|
||||
module.exit_json(**results)
|
||||
|
||||
else:
|
||||
pkg = params['name']
|
||||
state = params['state']
|
||||
enablerepo = params.get('enablerepo', '')
|
||||
disablerepo = params.get('disablerepo', '')
|
||||
disable_gpg_check = params['disable_gpg_check']
|
||||
res = ensure(module, state, pkg, params['conf_file'], enablerepo,
|
||||
disablerepo, disable_gpg_check)
|
||||
module.fail_json(msg="we should never get here unless this all failed", **res)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
||||
|
|
@ -49,7 +49,7 @@ options:
|
|||
aliases: [ "prefer-source" ]
|
||||
prefer_dist:
|
||||
description:
|
||||
- Forces installation from package dist even for de versions ( see --prefer-dist )
|
||||
- Forces installation from package dist even for dev versions ( see --prefer-dist )
|
||||
required: false
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
|
|
|
@ -335,6 +335,7 @@ def cleanup_packages(module, packages):
|
|||
def run_emerge(module, packages, *args):
|
||||
args = list(args)
|
||||
|
||||
args.append('--ask=n')
|
||||
if module.check_mode:
|
||||
args.append('--pretend')
|
||||
|
||||
|
|
|
@ -148,11 +148,8 @@ def package_present(m, name, installed_state, disable_gpg_check, disable_recomme
|
|||
if len(packages) != 0:
|
||||
cmd = ['/usr/bin/zypper', '--non-interactive']
|
||||
# add global options before zypper command
|
||||
if disable_gpg_check and not old_zypper:
|
||||
cmd.append('--no-gpg-check')
|
||||
else:
|
||||
if disable_gpg_check:
|
||||
cmd.append('--no-gpg-checks')
|
||||
|
||||
cmd.extend(['install', '--auto-agree-with-licenses'])
|
||||
# add install parameter
|
||||
if disable_recommends and not old_zypper:
|
||||
|
@ -182,10 +179,16 @@ def package_latest(m, name, installed_state, disable_gpg_check, disable_recommen
|
|||
if not changed:
|
||||
pre_upgrade_versions = get_current_version(m, name)
|
||||
|
||||
cmd = ['/usr/bin/zypper', '--non-interactive']
|
||||
|
||||
if disable_gpg_check:
|
||||
cmd.append('--no-gpg-checks')
|
||||
|
||||
if old_zypper:
|
||||
cmd = ['/usr/bin/zypper', '--non-interactive', 'install', '--auto-agree-with-licenses']
|
||||
cmd.extend(['install', '--auto-agree-with-licenses'])
|
||||
else:
|
||||
cmd = ['/usr/bin/zypper', '--non-interactive', 'update', '--auto-agree-with-licenses']
|
||||
cmd.extend(['update', '--auto-agree-with-licenses'])
|
||||
|
||||
cmd.extend(name)
|
||||
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
|
||||
|
||||
|
|
|
@ -57,6 +57,12 @@ options:
|
|||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
content_type:
|
||||
description:
|
||||
- Content type to use for requests made to the webhook
|
||||
required: false
|
||||
default: 'json'
|
||||
choices: ['json', 'form']
|
||||
|
||||
author: Phillip Gentry, CX Inc
|
||||
'''
|
||||
|
@ -69,7 +75,7 @@ EXAMPLES = '''
|
|||
- local_action: github_hooks action=cleanall user={{ gituser }} oauthkey={{ oauthkey }} repo={{ repo }}
|
||||
'''
|
||||
|
||||
def list(module, hookurl, oauthkey, repo, user):
|
||||
def _list(module, hookurl, oauthkey, repo, user):
|
||||
url = "%s/hooks" % repo
|
||||
auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '')
|
||||
headers = {
|
||||
|
@ -81,38 +87,38 @@ def list(module, hookurl, oauthkey, repo, user):
|
|||
else:
|
||||
return False, response.read()
|
||||
|
||||
def clean504(module, hookurl, oauthkey, repo, user):
|
||||
current_hooks = list(hookurl, oauthkey, repo, user)[1]
|
||||
def _clean504(module, hookurl, oauthkey, repo, user):
|
||||
current_hooks = _list(hookurl, oauthkey, repo, user)[1]
|
||||
decoded = json.loads(current_hooks)
|
||||
|
||||
for hook in decoded:
|
||||
if hook['last_response']['code'] == 504:
|
||||
# print "Last response was an ERROR for hook:"
|
||||
# print hook['id']
|
||||
delete(module, hookurl, oauthkey, repo, user, hook['id'])
|
||||
_delete(module, hookurl, oauthkey, repo, user, hook['id'])
|
||||
|
||||
return 0, current_hooks
|
||||
|
||||
def cleanall(module, hookurl, oauthkey, repo, user):
|
||||
current_hooks = list(hookurl, oauthkey, repo, user)[1]
|
||||
def _cleanall(module, hookurl, oauthkey, repo, user):
|
||||
current_hooks = _list(hookurl, oauthkey, repo, user)[1]
|
||||
decoded = json.loads(current_hooks)
|
||||
|
||||
for hook in decoded:
|
||||
if hook['last_response']['code'] != 200:
|
||||
# print "Last response was an ERROR for hook:"
|
||||
# print hook['id']
|
||||
delete(module, hookurl, oauthkey, repo, user, hook['id'])
|
||||
_delete(module, hookurl, oauthkey, repo, user, hook['id'])
|
||||
|
||||
return 0, current_hooks
|
||||
|
||||
def create(module, hookurl, oauthkey, repo, user):
|
||||
def _create(module, hookurl, oauthkey, repo, user, content_type):
|
||||
url = "%s/hooks" % repo
|
||||
values = {
|
||||
"active": True,
|
||||
"name": "web",
|
||||
"config": {
|
||||
"url": "%s" % hookurl,
|
||||
"content_type": "json"
|
||||
"content_type": "%s" % content_type
|
||||
}
|
||||
}
|
||||
data = json.dumps(values)
|
||||
|
@ -126,7 +132,7 @@ def create(module, hookurl, oauthkey, repo, user):
|
|||
else:
|
||||
return 0, response.read()
|
||||
|
||||
def delete(module, hookurl, oauthkey, repo, user, hookid):
|
||||
def _delete(module, hookurl, oauthkey, repo, user, hookid):
|
||||
url = "%s/hooks/%s" % (repo, hookid)
|
||||
auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '')
|
||||
headers = {
|
||||
|
@ -144,6 +150,7 @@ def main():
|
|||
repo=dict(required=True),
|
||||
user=dict(required=True),
|
||||
validate_certs=dict(default='yes', type='bool'),
|
||||
content_type=dict(default='json', choices=['json', 'form']),
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -152,18 +159,19 @@ def main():
|
|||
oauthkey = module.params['oauthkey']
|
||||
repo = module.params['repo']
|
||||
user = module.params['user']
|
||||
content_type = module.params['content_type']
|
||||
|
||||
if action == "list":
|
||||
(rc, out) = list(module, hookurl, oauthkey, repo, user)
|
||||
(rc, out) = _list(module, hookurl, oauthkey, repo, user)
|
||||
|
||||
if action == "clean504":
|
||||
(rc, out) = clean504(module, hookurl, oauthkey, repo, user)
|
||||
(rc, out) = _clean504(module, hookurl, oauthkey, repo, user)
|
||||
|
||||
if action == "cleanall":
|
||||
(rc, out) = cleanall(module, hookurl, oauthkey, repo, user)
|
||||
(rc, out) = _cleanall(module, hookurl, oauthkey, repo, user)
|
||||
|
||||
if action == "create":
|
||||
(rc, out) = create(module, hookurl, oauthkey, repo, user)
|
||||
(rc, out) = _create(module, hookurl, oauthkey, repo, user, content_type)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed", result=out)
|
||||
|
|
|
@ -62,7 +62,8 @@ def main():
|
|||
name = dict(required=True),
|
||||
path = dict(required=True),
|
||||
link = dict(required=False),
|
||||
)
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
params = module.params
|
||||
|
@ -74,13 +75,14 @@ def main():
|
|||
|
||||
current_path = None
|
||||
all_alternatives = []
|
||||
os_family = None
|
||||
|
||||
(rc, query_output, query_error) = module.run_command(
|
||||
[UPDATE_ALTERNATIVES, '--query', name]
|
||||
)
|
||||
|
||||
# Gather the current setting and all alternatives from the query output.
|
||||
# Query output should look something like this:
|
||||
# Query output should look something like this on Debian systems:
|
||||
|
||||
# Name: java
|
||||
# Link: /usr/bin/java
|
||||
|
@ -101,6 +103,7 @@ def main():
|
|||
# java.1.gz /usr/lib/jvm/java-7-openjdk-amd64/jre/man/man1/java.1.gz
|
||||
|
||||
if rc == 0:
|
||||
os_family = "Debian"
|
||||
for line in query_output.splitlines():
|
||||
split_line = line.split(':')
|
||||
if len(split_line) == 2:
|
||||
|
@ -112,11 +115,27 @@ def main():
|
|||
all_alternatives.append(value)
|
||||
elif key == 'Link' and not link:
|
||||
link = value
|
||||
elif rc == 2:
|
||||
os_family = "RedHat"
|
||||
# This is the version of update-alternatives that is shipped with
|
||||
# chkconfig on RedHat-based systems. Try again with the right options.
|
||||
(rc, query_output, query_error) = module.run_command(
|
||||
[UPDATE_ALTERNATIVES, '--list']
|
||||
)
|
||||
for line in query_output.splitlines():
|
||||
line_name, line_mode, line_path = line.strip().split("\t")
|
||||
if line_name != name:
|
||||
continue
|
||||
current_path = line_path
|
||||
break
|
||||
|
||||
if current_path != path:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True, current_path=current_path)
|
||||
try:
|
||||
# install the requested path if necessary
|
||||
if path not in all_alternatives:
|
||||
# (unsupported on the RedHat version)
|
||||
if path not in all_alternatives and os_family == "Debian":
|
||||
module.run_command(
|
||||
[UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)],
|
||||
check_rc=True
|
||||
|
|
|
@ -78,7 +78,7 @@ import tempfile
|
|||
|
||||
|
||||
def add_job(module, result, at_cmd, count, units, command, script_file):
|
||||
at_command = "%s now + %s %s -f %s" % (at_cmd, count, units, script_file)
|
||||
at_command = "%s -f %s now + %s %s" % (at_cmd, script_file, count, units)
|
||||
rc, out, err = module.run_command(at_command, check_rc=True)
|
||||
if command:
|
||||
os.unlink(script_file)
|
||||
|
|
332
system/crypttab.py
Normal file
332
system/crypttab.py
Normal file
|
@ -0,0 +1,332 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2014, Steve <yo@groks.org>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: crypttab
|
||||
short_description: Encrypted Linux block devices
|
||||
description:
|
||||
- Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab).
|
||||
version_added: "1.8"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or
|
||||
optionaly prefixed with C(/dev/mapper), as it appears in the filesystem. I(/dev/mapper)
|
||||
will be stripped from I(name).
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
state:
|
||||
description:
|
||||
- Use I(present) to add a line to C(/etc/crypttab) or update it's definition
|
||||
if already present. Use I(absent) to remove a line with matching I(name).
|
||||
Use I(opts_present) to add options to those already present; options with
|
||||
different values will be updated. Use I(opts_absent) to remove options from
|
||||
the existing set.
|
||||
required: true
|
||||
choices: [ "present", "absent", "opts_present", "opts_absent"]
|
||||
default: null
|
||||
backing_device:
|
||||
description:
|
||||
- Path to the underlying block device or file, or the UUID of a block-device
|
||||
prefixed with I(UUID=)
|
||||
required: false
|
||||
default: null
|
||||
password:
|
||||
description:
|
||||
- Encryption password, the path to a file containing the pasword, or
|
||||
'none' or '-' if the password should be entered at boot.
|
||||
required: false
|
||||
default: "none"
|
||||
opts:
|
||||
description:
|
||||
- A comma-delimited list of options. See C(crypttab(5) ) for details.
|
||||
required: false
|
||||
path:
|
||||
description:
|
||||
- Path to file to use instead of C(/etc/crypttab). This might be useful
|
||||
in a chroot environment.
|
||||
required: false
|
||||
default: /etc/crypttab
|
||||
|
||||
notes: []
|
||||
requirements: []
|
||||
author: Steve <yo@groks.org>
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Set the options explicitly a deivce which must already exist
|
||||
crypttab: name=luks-home state=present opts=discard,cipher=aes-cbc-essiv:sha256
|
||||
|
||||
- name: Add the 'discard' option to any existing options for all devices
|
||||
crypttab: name={{ item.device }} state=opts_present opts=discard
|
||||
with_items: ansible_mounts
|
||||
when: '/dev/mapper/luks-' in {{ item.device }}
|
||||
'''
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
name = dict(required=True),
|
||||
state = dict(required=True, choices=['present', 'absent', 'opts_present', 'opts_absent']),
|
||||
backing_device = dict(default=None),
|
||||
password = dict(default=None),
|
||||
opts = dict(default=None),
|
||||
path = dict(default='/etc/crypttab')
|
||||
),
|
||||
supports_check_mode = True
|
||||
)
|
||||
|
||||
name = module.params['name'].lstrip('/dev/mapper')
|
||||
backing_device = module.params['backing_device']
|
||||
password = module.params['password']
|
||||
opts = module.params['opts']
|
||||
state = module.params['state']
|
||||
path = module.params['path']
|
||||
|
||||
if backing_device is None and password is None and opts is None:
|
||||
module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'",
|
||||
**module.params)
|
||||
|
||||
if 'opts' in state and (backing_device is not None or password is not None):
|
||||
module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state,
|
||||
**module.params)
|
||||
|
||||
for arg_name, arg in (('name', name),
|
||||
('backing_device', backing_device),
|
||||
('password', password),
|
||||
('opts', opts)):
|
||||
if (arg is not None
|
||||
and (' ' in arg or '\t' in arg or arg == '')):
|
||||
module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name,
|
||||
**module.params)
|
||||
|
||||
try:
|
||||
crypttab = Crypttab(path)
|
||||
existing_line = crypttab.match(name)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="failed to open and parse crypttab file: %s" % e,
|
||||
**module.params)
|
||||
|
||||
if 'present' in state and existing_line is None and backing_device is None:
|
||||
module.fail_json(msg="'backing_device' required to add a new entry",
|
||||
**module.params)
|
||||
|
||||
changed, reason = False, '?'
|
||||
|
||||
if state == 'absent':
|
||||
if existing_line is not None:
|
||||
changed, reason = existing_line.remove()
|
||||
|
||||
elif state == 'present':
|
||||
if existing_line is not None:
|
||||
changed, reason = existing_line.set(backing_device, password, opts)
|
||||
else:
|
||||
changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
|
||||
|
||||
elif state == 'opts_present':
|
||||
if existing_line is not None:
|
||||
changed, reason = existing_line.opts.add(opts)
|
||||
else:
|
||||
changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
|
||||
|
||||
elif state == 'opts_absent':
|
||||
if existing_line is not None:
|
||||
changed, reason = existing_line.opts.remove(opts)
|
||||
|
||||
|
||||
if changed and not module.check_mode:
|
||||
with open(path, 'wb') as f:
|
||||
f.write(str(crypttab))
|
||||
|
||||
module.exit_json(changed=changed, msg=reason, **module.params)
|
||||
|
||||
|
||||
class Crypttab(object):
|
||||
|
||||
_lines = []
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
if not os.path.exists(path):
|
||||
if not os.path.exists(os.path.dirname(path)):
|
||||
os.makedirs(os.path.dirname(path))
|
||||
open(path,'a').close()
|
||||
|
||||
with open(path, 'r') as f:
|
||||
for line in f.readlines():
|
||||
self._lines.append(Line(line))
|
||||
|
||||
def add(self, line):
|
||||
self._lines.append(line)
|
||||
return True, 'added line'
|
||||
|
||||
def lines(self):
|
||||
for line in self._lines:
|
||||
if line.valid():
|
||||
yield line
|
||||
|
||||
def match(self, name):
|
||||
for line in self.lines():
|
||||
if line.name == name:
|
||||
return line
|
||||
return None
|
||||
|
||||
def __str__(self):
|
||||
lines = []
|
||||
for line in self._lines:
|
||||
lines.append(str(line))
|
||||
crypttab = '\n'.join(lines)
|
||||
if crypttab[-1] != '\n':
|
||||
crypttab += '\n'
|
||||
return crypttab
|
||||
|
||||
|
||||
class Line(object):
|
||||
|
||||
def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None):
|
||||
self.line = line
|
||||
self.name = name
|
||||
self.backing_device = backing_device
|
||||
self.password = password
|
||||
self.opts = Options(opts)
|
||||
|
||||
if line is not None:
|
||||
if self._line_valid(line):
|
||||
self.name, backing_device, password, opts = self._split_line(line)
|
||||
|
||||
self.set(backing_device, password, opts)
|
||||
|
||||
def set(self, backing_device, password, opts):
|
||||
changed = False
|
||||
|
||||
if backing_device is not None and self.backing_device != backing_device:
|
||||
self.backing_device = backing_device
|
||||
changed = True
|
||||
|
||||
if password is not None and self.password != password:
|
||||
self.password = password
|
||||
changed = True
|
||||
|
||||
if opts is not None:
|
||||
opts = Options(opts)
|
||||
if opts != self.opts:
|
||||
self.opts = opts
|
||||
changed = True
|
||||
|
||||
return changed, 'updated line'
|
||||
|
||||
def _line_valid(self, line):
|
||||
if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _split_line(self, line):
|
||||
fields = line.split()
|
||||
return (fields[0],
|
||||
fields[1],
|
||||
fields[2] if len(fields) >= 3 else None,
|
||||
fields[3] if len(fields) >= 4 else None)
|
||||
|
||||
def remove(self):
|
||||
self.line, self.name, self.backing_device = '', None, None
|
||||
return True, 'removed line'
|
||||
|
||||
def valid(self):
|
||||
if self.name is not None and self.backing_device is not None:
|
||||
return True
|
||||
return False
|
||||
|
||||
def __str__(self):
|
||||
if self.valid():
|
||||
fields = [self.name, self.backing_device]
|
||||
if self.password is not None or self.opts:
|
||||
fields.append(self.password if self.password is not None else 'none')
|
||||
if self.opts:
|
||||
fields.append(str(self.opts))
|
||||
return ' '.join(fields)
|
||||
return self.line
|
||||
|
||||
|
||||
class Options(dict):
|
||||
"""opts_string looks like: 'discard,foo=bar,baz=greeble' """
|
||||
|
||||
def __init__(self, opts_string):
|
||||
super(Options, self).__init__()
|
||||
self.itemlist = []
|
||||
if opts_string is not None:
|
||||
for opt in opts_string.split(','):
|
||||
kv = opt.split('=')
|
||||
k, v = (kv[0], kv[1]) if len(kv) > 1 else (kv[0], None)
|
||||
self[k] = v
|
||||
|
||||
def add(self, opts_string):
|
||||
changed = False
|
||||
for k, v in Options(opts_string).items():
|
||||
if self.has_key(k):
|
||||
if self[k] != v:
|
||||
changed = True
|
||||
else:
|
||||
changed = True
|
||||
self[k] = v
|
||||
return changed, 'updated options'
|
||||
|
||||
def remove(self, opts_string):
|
||||
changed = False
|
||||
for k in Options(opts_string):
|
||||
if self.has_key(k):
|
||||
del self[k]
|
||||
changed = True
|
||||
return changed, 'removed options'
|
||||
|
||||
def keys(self):
|
||||
return self.itemlist
|
||||
|
||||
def values(self):
|
||||
return [self[key] for key in self]
|
||||
|
||||
def items(self):
|
||||
return [(key, self[key]) for key in self]
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.itemlist)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if not self.has_key(key):
|
||||
self.itemlist.append(key)
|
||||
super(Options, self).__setitem__(key, value)
|
||||
|
||||
def __delitem__(self, key):
|
||||
self.itemlist.remove(key)
|
||||
super(Options, self).__delitem__(key)
|
||||
|
||||
def __ne__(self, obj):
|
||||
return not (isinstance(obj, Options)
|
||||
and sorted(self.items()) == sorted(obj.items()))
|
||||
|
||||
def __str__(self):
|
||||
return ','.join([k if v is None else '%s=%s' % (k, v)
|
||||
for k, v in self.items()])
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
|
@ -34,6 +34,7 @@ notes:
|
|||
- A number of questions have to be answered (depending on the package).
|
||||
Use 'debconf-show <package>' on any Debian or derivative with the package
|
||||
installed to see questions/settings available.
|
||||
- Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
|
||||
requirements: [ debconf, debconf-utils ]
|
||||
options:
|
||||
name:
|
||||
|
|
|
@ -51,12 +51,16 @@ options:
|
|||
description:
|
||||
- "Should this configuration be in the running firewalld configuration or persist across reboots"
|
||||
required: true
|
||||
default: true
|
||||
immediate:
|
||||
description:
|
||||
- "Should this configuration be applied immediately, if set as permanent"
|
||||
required: false
|
||||
default: false
|
||||
version_added: "1.9"
|
||||
state:
|
||||
description:
|
||||
- "Should this port accept(enabled) or reject(disabled) connections"
|
||||
required: true
|
||||
default: enabled
|
||||
timeout:
|
||||
description:
|
||||
- "The amount of time the rule should be in effect for when non-permanent"
|
||||
|
@ -211,6 +215,7 @@ def main():
|
|||
rich_rule=dict(required=False,default=None),
|
||||
zone=dict(required=False,default=None),
|
||||
permanent=dict(type='bool',required=True),
|
||||
immediate=dict(type='bool',default=False),
|
||||
state=dict(choices=['enabled', 'disabled'], required=True),
|
||||
timeout=dict(type='int',required=False,default=0),
|
||||
),
|
||||
|
@ -241,6 +246,7 @@ def main():
|
|||
|
||||
permanent = module.params['permanent']
|
||||
desired_state = module.params['state']
|
||||
immediate = module.params['immediate']
|
||||
timeout = module.params['timeout']
|
||||
|
||||
## Check for firewalld running
|
||||
|
@ -281,7 +287,7 @@ def main():
|
|||
|
||||
set_service_disabled_permanent(zone, service)
|
||||
changed=True
|
||||
else:
|
||||
if immediate or not permanent:
|
||||
is_enabled = get_service_enabled(zone, service)
|
||||
msgs.append('Non-permanent operation')
|
||||
|
||||
|
@ -323,7 +329,7 @@ def main():
|
|||
|
||||
set_port_disabled_permanent(zone, port, protocol)
|
||||
changed=True
|
||||
else:
|
||||
if immediate or not permanent:
|
||||
is_enabled = get_port_enabled(zone, [port,protocol])
|
||||
msgs.append('Non-permanent operation')
|
||||
|
||||
|
@ -365,7 +371,7 @@ def main():
|
|||
|
||||
set_rich_rule_disabled_permanent(zone, rich_rule)
|
||||
changed=True
|
||||
else:
|
||||
if immediate or not permanent:
|
||||
is_enabled = get_rich_rule_enabled(zone, rich_rule)
|
||||
msgs.append('Non-permanent operation')
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ module: getent
|
|||
short_description: a wrapper to the unix getent utility
|
||||
description:
|
||||
- Runs getent against one of it's various databases and returns information into
|
||||
the host's facts
|
||||
the host's facts, in a getent_<database> prefixed variable
|
||||
version_added: "1.8"
|
||||
options:
|
||||
database:
|
||||
|
@ -51,7 +51,7 @@ options:
|
|||
description:
|
||||
- If a supplied key is missing this will make the task fail if True
|
||||
|
||||
notes:
|
||||
notes:
|
||||
- "Not all databases support enumeration, check system documentation for details"
|
||||
requirements: [ ]
|
||||
author: Brian Coca
|
||||
|
@ -60,23 +60,23 @@ author: Brian Coca
|
|||
EXAMPLES = '''
|
||||
# get root user info
|
||||
- getent: database=passwd key=root
|
||||
register: root_info
|
||||
- debug: var=getent_passwd
|
||||
|
||||
# get all groups
|
||||
- getent: database=group split=':'
|
||||
register: groups
|
||||
- debug: var=getent_group
|
||||
|
||||
# get all hosts, split by tab
|
||||
- getent: database=hosts
|
||||
register: hosts
|
||||
- debug: var=getent_hosts
|
||||
|
||||
# get http service info, no error if missing
|
||||
- getent: database=services key=http fail_key=False
|
||||
register: http_info
|
||||
- debug: var=getent_services
|
||||
|
||||
# get user password hash (requires sudo/root)
|
||||
- getent: database=shadow key=www-data split=:
|
||||
register: pw_hash
|
||||
- debug: var=getent_shadow
|
||||
|
||||
'''
|
||||
|
||||
|
|
401
system/gluster_volume.py
Normal file
401
system/gluster_volume.py
Normal file
|
@ -0,0 +1,401 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2014, Taneli Leppä <taneli@crasman.fi>
|
||||
#
|
||||
# This file is part of Ansible (sort of)
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
DOCUMENTATION = """
|
||||
module: gluster_volume
|
||||
short_description: Manage GlusterFS volumes
|
||||
description:
|
||||
- Create, remove, start, stop and tune GlusterFS volumes
|
||||
options:
|
||||
name:
|
||||
required: true
|
||||
description:
|
||||
- The volume name
|
||||
state:
|
||||
required: true
|
||||
choices: [ 'present', 'absent', 'started', 'stopped' ]
|
||||
description:
|
||||
- Use present/absent ensure if a volume exists or not,
|
||||
use started/stopped to control it's availability.
|
||||
cluster:
|
||||
required: false
|
||||
description:
|
||||
- List of hosts to use for probing and brick setup
|
||||
host:
|
||||
required: false
|
||||
description:
|
||||
- Override local hostname (for peer probing purposes)
|
||||
replicas:
|
||||
required: false
|
||||
description:
|
||||
- Replica count for volume
|
||||
stripes:
|
||||
required: false
|
||||
description:
|
||||
- Stripe count for volume
|
||||
transport:
|
||||
required: false
|
||||
choices: [ 'tcp', 'rdma', 'tcp,rdma' ]
|
||||
description:
|
||||
- Transport type for volume
|
||||
brick:
|
||||
required: false
|
||||
description:
|
||||
- Brick path on servers
|
||||
start_on_create:
|
||||
choices: [ 'yes', 'no']
|
||||
required: false
|
||||
description:
|
||||
- Controls whether the volume is started after creation or not, defaults to yes
|
||||
rebalance:
|
||||
choices: [ 'yes', 'no']
|
||||
required: false
|
||||
description:
|
||||
- Controls whether the cluster is rebalanced after changes
|
||||
directory:
|
||||
required: false
|
||||
description:
|
||||
- Directory for limit-usage
|
||||
options:
|
||||
required: false
|
||||
description:
|
||||
- A dictionary/hash with options/settings for the volume
|
||||
quota:
|
||||
required: false
|
||||
description:
|
||||
- Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list)
|
||||
notes:
|
||||
- "Requires cli tools for GlusterFS on servers"
|
||||
- "Will add new bricks, but not remove them"
|
||||
author: Taneli Leppä
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: create gluster volume
|
||||
gluster_volume: state=present name=test1 brick=/bricks/brick1/g1 rebalance=yes hosts:"{{ play_hosts }}"
|
||||
run_once: true
|
||||
|
||||
- name: tune
|
||||
gluster_volume: state=present name=test1 options='{performance.cache-size: 256MB}'
|
||||
|
||||
- name: start gluster volume
|
||||
gluster_volume: status=started name=test1
|
||||
|
||||
- name: limit usage
|
||||
gluster_volume: state=present name=test1 directory=/foo quota=20.0MB
|
||||
|
||||
- name: stop gluster volume
|
||||
gluster_volume: state=stopped name=test1
|
||||
|
||||
- name: remove gluster volume
|
||||
gluster_volume: state=absent name=test1
|
||||
"""
|
||||
|
||||
import shutil
|
||||
import time
|
||||
import socket
|
||||
|
||||
def main():
|
||||
|
||||
|
||||
def run_gluster(gargs, **kwargs):
|
||||
args = [glusterbin]
|
||||
args.extend(gargs)
|
||||
rc, out, err = module.run_command(args, **kwargs)
|
||||
if rc != 0:
|
||||
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err))
|
||||
return out
|
||||
|
||||
def run_gluster_nofail(gargs, **kwargs):
|
||||
args = [glusterbin]
|
||||
args.extend(gargs)
|
||||
rc, out, err = module.run_command(args, **kwargs)
|
||||
if rc != 0:
|
||||
return None
|
||||
return out
|
||||
|
||||
def run_gluster_yes(gargs):
|
||||
args = [glusterbin]
|
||||
args.extend(gargs)
|
||||
rc, out, err = module.run_command(args, data='y\n')
|
||||
if rc != 0:
|
||||
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err))
|
||||
return out
|
||||
|
||||
def get_peers():
|
||||
out = run_gluster([ 'peer', 'status'])
|
||||
i = 0
|
||||
peers = {}
|
||||
hostname = None
|
||||
uuid = None
|
||||
state = None
|
||||
for row in out.split('\n'):
|
||||
if ': ' in row:
|
||||
key, value = row.split(': ')
|
||||
if key.lower() == 'hostname':
|
||||
hostname = value
|
||||
if key.lower() == 'uuid':
|
||||
uuid = value
|
||||
if key.lower() == 'state':
|
||||
state = value
|
||||
peers[hostname] = [ uuid, state ]
|
||||
return peers
|
||||
|
||||
def get_volumes():
|
||||
out = run_gluster([ 'volume', 'info' ])
|
||||
|
||||
volumes = {}
|
||||
volume = {}
|
||||
for row in out.split('\n'):
|
||||
if ': ' in row:
|
||||
key, value = row.split(': ')
|
||||
if key.lower() == 'volume name':
|
||||
volume['name'] = value
|
||||
volume['options'] = {}
|
||||
volume['quota'] = False
|
||||
if key.lower() == 'volume id':
|
||||
volume['id'] = value
|
||||
if key.lower() == 'status':
|
||||
volume['status'] = value
|
||||
if key.lower() == 'transport-type':
|
||||
volume['transport'] = value
|
||||
if key.lower() != 'bricks' and key.lower()[:5] == 'brick':
|
||||
if not 'bricks' in volume:
|
||||
volume['bricks'] = []
|
||||
volume['bricks'].append(value)
|
||||
# Volume options
|
||||
if '.' in key:
|
||||
if not 'options' in volume:
|
||||
volume['options'] = {}
|
||||
volume['options'][key] = value
|
||||
if key == 'features.quota' and value == 'on':
|
||||
volume['quota'] = True
|
||||
else:
|
||||
if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:':
|
||||
if len(volume) > 0:
|
||||
volumes[volume['name']] = volume
|
||||
volume = {}
|
||||
return volumes
|
||||
|
||||
def get_quotas(name, nofail):
|
||||
quotas = {}
|
||||
if nofail:
|
||||
out = run_gluster_nofail([ 'volume', 'quota', name, 'list' ])
|
||||
if not out:
|
||||
return quotas
|
||||
else:
|
||||
out = run_gluster([ 'volume', 'quota', name, 'list' ])
|
||||
for row in out.split('\n'):
|
||||
if row[:1] == '/':
|
||||
q = re.split('\s+', row)
|
||||
quotas[q[0]] = q[1]
|
||||
return quotas
|
||||
|
||||
def wait_for_peer(host):
|
||||
for x in range(0, 4):
|
||||
peers = get_peers()
|
||||
if host in peers and peers[host][1].lower().find('peer in cluster') != -1:
|
||||
return True
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def probe(host):
|
||||
run_gluster([ 'peer', 'probe', host ])
|
||||
if not wait_for_peer(host):
|
||||
module.fail_json(msg='failed to probe peer %s' % host)
|
||||
changed = True
|
||||
|
||||
def probe_all_peers(hosts, peers, myhostname):
|
||||
for host in hosts:
|
||||
if host not in peers:
|
||||
# dont probe ourselves
|
||||
if myhostname != host:
|
||||
probe(host)
|
||||
|
||||
def create_volume(name, stripe, replica, transport, hosts, brick):
|
||||
args = [ 'volume', 'create' ]
|
||||
args.append(name)
|
||||
if stripe:
|
||||
args.append('stripe')
|
||||
args.append(str(stripe))
|
||||
if replica:
|
||||
args.append('replica')
|
||||
args.append(str(replica))
|
||||
args.append('transport')
|
||||
args.append(transport)
|
||||
for host in hosts:
|
||||
args.append(('%s:%s' % (host, brick)))
|
||||
run_gluster(args)
|
||||
|
||||
def start_volume(name):
|
||||
run_gluster([ 'volume', 'start', name ])
|
||||
|
||||
def stop_volume(name):
|
||||
run_gluster_yes([ 'volume', 'stop', name ])
|
||||
|
||||
def set_volume_option(name, option, parameter):
|
||||
run_gluster([ 'volume', 'set', name, option, parameter ])
|
||||
|
||||
def add_brick(name, brick):
|
||||
run_gluster([ 'volume', 'add-brick', name, brick ])
|
||||
|
||||
def rebalance(name):
|
||||
run_gluster(['volume', 'rebalance', name, 'start'])
|
||||
|
||||
def enable_quota(name):
|
||||
run_gluster([ 'volume', 'quota', name, 'enable' ])
|
||||
|
||||
def set_quota(name, directory, value):
|
||||
run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ])
|
||||
|
||||
|
||||
### MAIN ###
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(required=True, default=None, aliases=['volume']),
|
||||
state=dict(required=True, choices=[ 'present', 'absent', 'started', 'stopped', 'rebalanced' ]),
|
||||
cluster=dict(required=False, default=None, type='list'),
|
||||
host=dict(required=False, default=None),
|
||||
stripes=dict(required=False, default=None, type='int'),
|
||||
replicas=dict(required=False, default=None, type='int'),
|
||||
transport=dict(required=False, default='tcp', choices=[ 'tcp', 'rdma', 'tcp,rdma' ]),
|
||||
brick=dict(required=False, default=None),
|
||||
start_on_create=dict(required=False, default=True, type='bool'),
|
||||
rebalance=dict(required=False, default=False, taype='bool'),
|
||||
options=dict(required=False, default=None, type='dict'),
|
||||
quota=dict(required=False),
|
||||
directory=dict(required=False, default=None),
|
||||
)
|
||||
)
|
||||
|
||||
glusterbin = module.get_bin_path('gluster', True)
|
||||
|
||||
changed = False
|
||||
|
||||
action = module.params['state']
|
||||
volume_name = module.params['name']
|
||||
cluster= module.params['cluster']
|
||||
brick_path = module.params['brick']
|
||||
stripes = module.params['stripes']
|
||||
replicas = module.params['replicas']
|
||||
transport = module.params['transport']
|
||||
myhostname = module.params['host']
|
||||
start_volume = module.boolean(module.params['start_on_create'])
|
||||
rebalance = module.boolean(module.params['rebalance'])
|
||||
|
||||
if not myhostname:
|
||||
myhostname = socket.gethostname()
|
||||
|
||||
options = module.params['options']
|
||||
quota = module.params['quota']
|
||||
directory = module.params['directory']
|
||||
|
||||
|
||||
# get current state info
|
||||
peers = get_peers()
|
||||
volumes = get_volumes()
|
||||
quotas = {}
|
||||
if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started':
|
||||
quotas = get_quotas(volume_name, True)
|
||||
|
||||
# do the work!
|
||||
if action == 'absent':
|
||||
if volume_name in volumes:
|
||||
run_gluster([ 'volume', 'delete', name ])
|
||||
changed = True
|
||||
|
||||
if action == 'present':
|
||||
probe_all_peers(cluster, peers, myhostname)
|
||||
|
||||
# create if it doesn't exist
|
||||
if volume_name not in volumes:
|
||||
create_volume(volume_name, stripes, replicas, transport, cluster, brick_path)
|
||||
changed = True
|
||||
|
||||
if volume_name in volumes:
|
||||
if volumes[volume_name]['status'].lower() != 'started' and start_volume:
|
||||
start_volume(volume_name)
|
||||
changed = True
|
||||
|
||||
# switch bricks
|
||||
new_bricks = []
|
||||
removed_bricks = []
|
||||
all_bricks = []
|
||||
for node in cluster:
|
||||
brick = '%s:%s' % (node, brick_path)
|
||||
all_bricks.append(brick)
|
||||
if brick not in volumes[volume_name]['bricks']:
|
||||
new_bricks.append(brick)
|
||||
|
||||
# this module does not yet remove bricks, but we check those anyways
|
||||
for brick in volumes[volume_name]['bricks']:
|
||||
if brick not in all_bricks:
|
||||
removed_bricks.append(brick)
|
||||
|
||||
for brick in new_bricks:
|
||||
add_brick(volume_name, brick)
|
||||
changed = True
|
||||
|
||||
# handle quotas
|
||||
if quota:
|
||||
if not volumes[volume_name]['quota']:
|
||||
enable_quota(volume_name)
|
||||
quotas = get_quotas(volume_name, False)
|
||||
if directory not in quotas or quotas[directory] != quota:
|
||||
set_quota(volume_name, directory, quota)
|
||||
changed = True
|
||||
|
||||
# set options
|
||||
for option in options.keys():
|
||||
if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]:
|
||||
set_volume_option(volume_name, option, options[option])
|
||||
changed = True
|
||||
|
||||
else:
|
||||
module.fail_json(msg='failed to create volume %s' % volume_name)
|
||||
|
||||
if volume_name not in volumes:
|
||||
module.fail_json(msg='volume not found %s' % volume_name)
|
||||
|
||||
if action == 'started':
|
||||
if volumes[volume_name]['status'].lower() != 'started':
|
||||
start_volume(volume_name)
|
||||
changed = True
|
||||
|
||||
if action == 'stopped':
|
||||
if volumes[volume_name]['status'].lower() != 'stopped':
|
||||
stop_volume(volume_name)
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
volumes = get_volumes()
|
||||
if rebalance:
|
||||
rebalance(volume_name)
|
||||
|
||||
facts = {}
|
||||
facts['glusterfs'] = { 'peers': peers, 'volumes': volumes, 'quotas': quotas }
|
||||
|
||||
module.exit_json(changed=changed, ansible_facts=facts)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
0
windows/__init__.py
Normal file
0
windows/__init__.py
Normal file
248
windows/win_chocolatey.ps1
Normal file
248
windows/win_chocolatey.ps1
Normal file
|
@ -0,0 +1,248 @@
|
|||
#!powershell
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Copyright 2014, Trond Hindenes <trond@hindenes.com>
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# WANT_JSON
|
||||
# POWERSHELL_COMMON
|
||||
|
||||
function Write-Log
|
||||
{
|
||||
param
|
||||
(
|
||||
[parameter(mandatory=$false)]
|
||||
[System.String]
|
||||
$message
|
||||
)
|
||||
|
||||
$date = get-date -format 'yyyy-MM-dd hh:mm:ss.zz'
|
||||
|
||||
Write-Host "$date | $message"
|
||||
|
||||
Out-File -InputObject "$date $message" -FilePath $global:LoggingFile -Append
|
||||
}
|
||||
|
||||
$params = Parse-Args $args;
|
||||
$result = New-Object PSObject;
|
||||
Set-Attr $result "changed" $false;
|
||||
|
||||
If ($params.name)
|
||||
{
|
||||
$package = $params.name
|
||||
}
|
||||
Else
|
||||
{
|
||||
Fail-Json $result "missing required argument: name"
|
||||
}
|
||||
|
||||
if(($params.logPath).length -gt 0)
|
||||
{
|
||||
$global:LoggingFile = $params.logPath
|
||||
}
|
||||
else
|
||||
{
|
||||
$global:LoggingFile = "c:\ansible-playbook.log"
|
||||
}
|
||||
If ($params.force)
|
||||
{
|
||||
$force = $params.force | ConvertTo-Bool
|
||||
}
|
||||
Else
|
||||
{
|
||||
$force = $false
|
||||
}
|
||||
|
||||
If ($params.version)
|
||||
{
|
||||
$version = $params.version
|
||||
}
|
||||
Else
|
||||
{
|
||||
$version = $null
|
||||
}
|
||||
|
||||
If ($params.showlog)
|
||||
{
|
||||
$showlog = $params.showlog | ConvertTo-Bool
|
||||
}
|
||||
Else
|
||||
{
|
||||
$showlog = $null
|
||||
}
|
||||
|
||||
If ($params.state)
|
||||
{
|
||||
$state = $params.state.ToString().ToLower()
|
||||
If (($state -ne "present") -and ($state -ne "absent"))
|
||||
{
|
||||
Fail-Json $result "state is $state; must be present or absent"
|
||||
}
|
||||
}
|
||||
Else
|
||||
{
|
||||
$state = "present"
|
||||
}
|
||||
|
||||
$ChocoAlreadyInstalled = get-command choco -ErrorAction 0
|
||||
if ($ChocoAlreadyInstalled -eq $null)
|
||||
{
|
||||
#We need to install chocolatey
|
||||
$install_choco_result = iex ((new-object net.webclient).DownloadString("https://chocolatey.org/install.ps1"))
|
||||
$result.changed = $true
|
||||
$executable = "C:\ProgramData\chocolatey\bin\choco.exe"
|
||||
}
|
||||
Else
|
||||
{
|
||||
$executable = "choco.exe"
|
||||
}
|
||||
|
||||
If ($params.source)
|
||||
{
|
||||
$source = $params.source.ToString().ToLower()
|
||||
If (($source -ne "chocolatey") -and ($source -ne "webpi") -and ($source -ne "windowsfeatures") -and ($source -ne "ruby"))
|
||||
{
|
||||
Fail-Json $result "source is $source - must be one of chocolatey, ruby, webpi or windowsfeatures."
|
||||
}
|
||||
}
|
||||
Elseif (!$params.source)
|
||||
{
|
||||
$source = "chocolatey"
|
||||
}
|
||||
|
||||
if ($source -eq "webpi")
|
||||
{
|
||||
# check whether 'webpi' installation source is available; if it isn't, install it
|
||||
$webpi_check_cmd = "$executable list webpicmd -localonly"
|
||||
$webpi_check_result = invoke-expression $webpi_check_cmd
|
||||
Set-Attr $result "chocolatey_bootstrap_webpi_check_cmd" $webpi_check_cmd
|
||||
Set-Attr $result "chocolatey_bootstrap_webpi_check_log" $webpi_check_result
|
||||
if (
|
||||
(
|
||||
($webpi_check_result.GetType().Name -eq "String") -and
|
||||
($webpi_check_result -match "No packages found")
|
||||
) -or
|
||||
($webpi_check_result -contains "No packages found.")
|
||||
)
|
||||
{
|
||||
#lessmsi is a webpicmd dependency, but dependency resolution fails unless it's installed separately
|
||||
$lessmsi_install_cmd = "$executable install lessmsi"
|
||||
$lessmsi_install_result = invoke-expression $lessmsi_install_cmd
|
||||
Set-Attr $result "chocolatey_bootstrap_lessmsi_install_cmd" $lessmsi_install_cmd
|
||||
Set-Attr $result "chocolatey_bootstrap_lessmsi_install_log" $lessmsi_install_result
|
||||
|
||||
$webpi_install_cmd = "$executable install webpicmd"
|
||||
$webpi_install_result = invoke-expression $webpi_install_cmd
|
||||
Set-Attr $result "chocolatey_bootstrap_webpi_install_cmd" $webpi_install_cmd
|
||||
Set-Attr $result "chocolatey_bootstrap_webpi_install_log" $webpi_install_result
|
||||
|
||||
if (($webpi_install_result | select-string "already installed").length -gt 0)
|
||||
{
|
||||
#no change
|
||||
}
|
||||
elseif (($webpi_install_result | select-string "webpicmd has finished successfully").length -gt 0)
|
||||
{
|
||||
$result.changed = $true
|
||||
}
|
||||
Else
|
||||
{
|
||||
Fail-Json $result "WebPI install error: $webpi_install_result"
|
||||
}
|
||||
}
|
||||
}
|
||||
$expression = $executable
|
||||
if ($state -eq "present")
|
||||
{
|
||||
$expression += " install $package"
|
||||
}
|
||||
Elseif ($state -eq "absent")
|
||||
{
|
||||
$expression += " uninstall $package"
|
||||
}
|
||||
if ($force)
|
||||
{
|
||||
if ($state -eq "present")
|
||||
{
|
||||
$expression += " -force"
|
||||
}
|
||||
}
|
||||
if ($version)
|
||||
{
|
||||
$expression += " -version $version"
|
||||
}
|
||||
if ($source -eq "chocolatey")
|
||||
{
|
||||
$expression += " -source https://chocolatey.org/api/v2/"
|
||||
}
|
||||
elseif (($source -eq "windowsfeatures") -or ($source -eq "webpi") -or ($source -eq "ruby"))
|
||||
{
|
||||
$expression += " -source $source"
|
||||
}
|
||||
|
||||
Set-Attr $result "chocolatey command" $expression
|
||||
$op_result = invoke-expression $expression
|
||||
if ($state -eq "present")
|
||||
{
|
||||
if (
|
||||
(($op_result | select-string "already installed").length -gt 0) -or
|
||||
# webpi has different text output, and that doesn't include the package name but instead the human-friendly name
|
||||
(($op_result | select-string "No products to be installed").length -gt 0)
|
||||
)
|
||||
{
|
||||
#no change
|
||||
}
|
||||
elseif (
|
||||
(($op_result | select-string "has finished successfully").length -gt 0) -or
|
||||
# webpi has different text output, and that doesn't include the package name but instead the human-friendly name
|
||||
(($op_result | select-string "Install of Products: SUCCESS").length -gt 0) -or
|
||||
(($op_result | select-string "gem installed").length -gt 0) -or
|
||||
(($op_result | select-string "gems installed").length -gt 0)
|
||||
)
|
||||
{
|
||||
$result.changed = $true
|
||||
}
|
||||
Else
|
||||
{
|
||||
Fail-Json $result "Install error: $op_result"
|
||||
}
|
||||
}
|
||||
Elseif ($state -eq "absent")
|
||||
{
|
||||
$op_result = invoke-expression "$executable uninstall $package"
|
||||
# HACK: Misleading - 'Uninstalling from folder' appears in output even when package is not installed, hence order of checks this way
|
||||
if (
|
||||
(($op_result | select-string "not installed").length -gt 0) -or
|
||||
(($op_result | select-string "Cannot find path").length -gt 0)
|
||||
)
|
||||
{
|
||||
#no change
|
||||
}
|
||||
elseif (($op_result | select-string "Uninstalling from folder").length -gt 0)
|
||||
{
|
||||
$result.changed = $true
|
||||
}
|
||||
else
|
||||
{
|
||||
Fail-Json $result "Uninstall error: $op_result"
|
||||
}
|
||||
}
|
||||
|
||||
if ($showlog)
|
||||
{
|
||||
Set-Attr $result "chocolatey_log" $op_result
|
||||
}
|
||||
Set-Attr $result "chocolatey_success" "true"
|
||||
|
||||
Exit-Json $result;
|
118
windows/win_chocolatey.py
Normal file
118
windows/win_chocolatey.py
Normal file
|
@ -0,0 +1,118 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2014, Trond Hindenes <trond@hindenes.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# this is a windows documentation stub. actual code lives in the .ps1
|
||||
# file of the same name
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: win_chocolatey
|
||||
version_added: "1.9"
|
||||
short_description: Installs packages using chocolatey
|
||||
description:
|
||||
- Installs packages using Chocolatey (http://chocolatey.org/). If Chocolatey is missing from the system, the module will install it. List of packages can be found at http://chocolatey.org/packages
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the package to be installed
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
state:
|
||||
description:
|
||||
- State of the package on the system
|
||||
required: false
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
default: present
|
||||
aliases: []
|
||||
force:
|
||||
description:
|
||||
- Forces install of the package (even if it already exists). Using Force will cause ansible to always report that a change was made
|
||||
required: false
|
||||
choices:
|
||||
- yes
|
||||
- no
|
||||
default: no
|
||||
aliases: []
|
||||
version:
|
||||
description:
|
||||
- Specific version of the package to be installed
|
||||
- Ignored when state == 'absent'
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
showlog:
|
||||
description:
|
||||
- Outputs the chocolatey log inside a chocolatey_log property.
|
||||
required: false
|
||||
choices:
|
||||
- yes
|
||||
- no
|
||||
default: no
|
||||
aliases: []
|
||||
source:
|
||||
description:
|
||||
- Which source to install from
|
||||
require: false
|
||||
choices:
|
||||
- chocolatey
|
||||
- ruby
|
||||
- webpi
|
||||
- windowsfeatures
|
||||
default: chocolatey
|
||||
aliases: []
|
||||
logPath:
|
||||
description:
|
||||
- Where to log command output to
|
||||
require: false
|
||||
default: c:\\ansible-playbook.log
|
||||
aliases: []
|
||||
author: Trond Hindenes, Peter Mounce
|
||||
'''
|
||||
|
||||
# TODO:
|
||||
# * Better parsing when a package has dependencies - currently fails
|
||||
# * Time each item that is run
|
||||
# * Support 'changed' with gems - would require shelling out to `gem list` first and parsing, kinda defeating the point of using chocolatey.
|
||||
|
||||
EXAMPLES = '''
|
||||
# Install git
|
||||
win_chocolatey:
|
||||
name: git
|
||||
|
||||
# Install notepadplusplus version 6.6
|
||||
win_chocolatey:
|
||||
name: notepadplusplus.install
|
||||
version: 6.6
|
||||
|
||||
# Uninstall git
|
||||
win_chocolatey:
|
||||
name: git
|
||||
state: absent
|
||||
|
||||
# Install Application Request Routing v3 from webpi
|
||||
# Logically, this requires that you install IIS first (see win_feature)
|
||||
# To find a list of packages available via webpi source, `choco list -source webpi`
|
||||
win_chocolatey:
|
||||
name: ARRv3
|
||||
source: webpi
|
||||
'''
|
86
windows/win_updates.ps1
Normal file
86
windows/win_updates.ps1
Normal file
|
@ -0,0 +1,86 @@
|
|||
#!powershell
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Copyright 2014, Trond Hindenes <trond@hindenes.com>
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# WANT_JSON
|
||||
# POWERSHELL_COMMON
|
||||
|
||||
function Write-Log
|
||||
{
|
||||
param
|
||||
(
|
||||
[parameter(mandatory=$false)]
|
||||
[System.String]
|
||||
$message
|
||||
)
|
||||
|
||||
$date = get-date -format 'yyyy-MM-dd hh:mm:ss.zz'
|
||||
|
||||
Write-Host "$date $message"
|
||||
|
||||
Out-File -InputObject "$date $message" -FilePath $global:LoggingFile -Append
|
||||
}
|
||||
|
||||
$params = Parse-Args $args;
|
||||
$result = New-Object PSObject;
|
||||
Set-Attr $result "changed" $false;
|
||||
|
||||
if(($params.logPath).Length -gt 0) {
|
||||
$global:LoggingFile = $params.logPath
|
||||
} else {
|
||||
$global:LoggingFile = "c:\ansible-playbook.log"
|
||||
}
|
||||
if ($params.category) {
|
||||
$category = $params.category
|
||||
} else {
|
||||
$category = "critical"
|
||||
}
|
||||
|
||||
$installed_prior = get-wulist -isinstalled | foreach { $_.KBArticleIDs }
|
||||
set-attr $result "updates_already_present" $installed_prior
|
||||
|
||||
write-log "Looking for updates in '$category'"
|
||||
set-attr $result "updates_category" $category
|
||||
$to_install = get-wulist -category $category
|
||||
$installed = @()
|
||||
foreach ($u in $to_install) {
|
||||
$kb = $u.KBArticleIDs
|
||||
write-log "Installing $kb - $($u.Title)"
|
||||
$install_result = get-wuinstall -KBArticleID $u.KBArticleIDs -acceptall -ignorereboot
|
||||
Set-Attr $result "updates_installed_KB$kb" $u.Title
|
||||
$installed += $kb
|
||||
}
|
||||
write-log "Installed: $($installed.count)"
|
||||
set-attr $result "updates_installed" $installed
|
||||
set-attr $result "updates_installed_count" $installed.count
|
||||
$result.changed = $installed.count -gt 0
|
||||
|
||||
$installed_afterwards = get-wulist -isinstalled | foreach { $_.KBArticleIDs }
|
||||
set-attr $result "updates_installed_afterwards" $installed_afterwards
|
||||
|
||||
$reboot_needed = Get-WURebootStatus
|
||||
write-log $reboot_needed
|
||||
if ($reboot_needed -match "not") {
|
||||
write-log "Reboot not required"
|
||||
} else {
|
||||
write-log "Reboot required"
|
||||
Set-Attr $result "updates_reboot_needed" $true
|
||||
$result.changed = $true
|
||||
}
|
||||
|
||||
Set-Attr $result "updates_success" "true"
|
||||
Exit-Json $result;
|
51
windows/win_updates.py
Normal file
51
windows/win_updates.py
Normal file
|
@ -0,0 +1,51 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2014, Peter Mounce <public@neverrunwithscissors.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# this is a windows documentation stub. actual code lives in the .ps1
|
||||
# file of the same name
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: win_updates
|
||||
version_added: "1.9"
|
||||
short_description: Lists / Installs windows updates
|
||||
description:
|
||||
- Installs windows updates using PSWindowsUpdate (http://gallery.technet.microsoft.com/scriptcenter/2d191bcd-3308-4edd-9de2-88dff796b0bc).
|
||||
- PSWindowsUpdate needs to be installed first - use win_chocolatey.
|
||||
options:
|
||||
category:
|
||||
description:
|
||||
- Which category to install updates from
|
||||
required: false
|
||||
default: critical
|
||||
choices:
|
||||
- critical
|
||||
- security
|
||||
- (anything that is a valid update category)
|
||||
default: critical
|
||||
aliases: []
|
||||
author: Peter Mounce
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Install updates from security category
|
||||
win_updates:
|
||||
category: security
|
||||
'''
|
Loading…
Reference in a new issue