Merge branch 'devel' of https://github.com/ansible/ansible-modules-extras into fix-lvol-lvcreate-prompt
This commit is contained in:
commit
f6faf53702
155 changed files with 4230 additions and 790 deletions
|
@ -1,10 +1,15 @@
|
|||
sudo: false
|
||||
language: python
|
||||
python:
|
||||
- "2.7"
|
||||
addons:
|
||||
apt:
|
||||
sources:
|
||||
- deadsnakes
|
||||
packages:
|
||||
- python2.4
|
||||
- python2.6
|
||||
script:
|
||||
- python2.4 -m compileall -fq -x 'cloud/' .
|
||||
- python2.4 -m compileall -fq -x 'cloud/|monitoring/zabbix.*\.py|/layman\.py|/maven_artifact\.py|clustering/consul.*\.py|notification/pushbullet\.py' .
|
||||
- python2.6 -m compileall -fq .
|
||||
- python2.7 -m compileall -fq .
|
||||
|
|
6
cloud/amazon/cloudtrail.py
Executable file → Normal file
6
cloud/amazon/cloudtrail.py
Executable file → Normal file
|
@ -19,9 +19,11 @@ DOCUMENTATION = """
|
|||
module: cloudtrail
|
||||
short_description: manage CloudTrail creation and deletion
|
||||
description:
|
||||
- Creates or deletes CloudTrail configuration. Ensures logging is also enabled. This module has a dependency on python-boto >= 2.21.
|
||||
- Creates or deletes CloudTrail configuration. Ensures logging is also enabled.
|
||||
version_added: "2.0"
|
||||
author: Ted Timmons
|
||||
author: "Ted Timmons (@tedder)"
|
||||
requirements:
|
||||
- "boto >= 2.21"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
|
|
91
cloud/amazon/ec2_win_password.py
Normal file
91
cloud/amazon/ec2_win_password.py
Normal file
|
@ -0,0 +1,91 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_win_password
|
||||
short_description: gets the default administrator password for ec2 windows instances
|
||||
description:
|
||||
- Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module has a dependency on python-boto.
|
||||
version_added: "2.0"
|
||||
author: Rick Mendes
|
||||
options:
|
||||
instance_id:
|
||||
description:
|
||||
- The instance id to get the password data from.
|
||||
required: true
|
||||
key_file:
|
||||
description:
|
||||
- path to the file containing the key pair used on the instance
|
||||
required: true
|
||||
region:
|
||||
description:
|
||||
- The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used.
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ 'aws_region', 'ec2_region' ]
|
||||
|
||||
extends_documentation_fragment: aws
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Example of getting a password
|
||||
tasks:
|
||||
- name: get the Administrator password
|
||||
ec2_win_password:
|
||||
profile: my-boto-profile
|
||||
instance_id: i-XXXXXX
|
||||
region: us-east-1
|
||||
key_file: "~/aws-creds/my_test_key.pem"
|
||||
'''
|
||||
|
||||
from base64 import b64decode
|
||||
from os.path import expanduser
|
||||
from Crypto.Cipher import PKCS1_v1_5
|
||||
from Crypto.PublicKey import RSA
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
instance_id = dict(required=True),
|
||||
key_file = dict(required=True),
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='Boto required for this module.')
|
||||
|
||||
instance_id = module.params.get('instance_id')
|
||||
key_file = expanduser(module.params.get('key_file'))
|
||||
|
||||
ec2 = ec2_connect(module)
|
||||
|
||||
data = ec2.get_password_data(instance_id)
|
||||
decoded = b64decode(data)
|
||||
|
||||
f = open(key_file, 'r')
|
||||
key = RSA.importKey(f.read())
|
||||
cipher = PKCS1_v1_5.new(key)
|
||||
sentinel = 'password decryption failed!!!'
|
||||
|
||||
try:
|
||||
decrypted = cipher.decrypt(decoded, sentinel)
|
||||
except ValueError as e:
|
||||
decrypted = None
|
||||
|
||||
if decrypted == None:
|
||||
module.exit_json(win_password='', changed=False)
|
||||
else:
|
||||
module.exit_json(win_password=decrypted, changed=True)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
main()
|
410
cloud/cloudstack/cs_account.py
Normal file
410
cloud/cloudstack/cs_account.py
Normal file
|
@ -0,0 +1,410 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# (c) 2015, René Moser <mail@renemoser.net>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: cs_account
|
||||
short_description: Manages account on Apache CloudStack based clouds.
|
||||
description:
|
||||
- Create, disable, lock, enable and remove accounts.
|
||||
version_added: '2.0'
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of account.
|
||||
required: true
|
||||
username:
|
||||
description:
|
||||
- Username of the user to be created if account did not exist.
|
||||
- Required on C(state=present).
|
||||
required: false
|
||||
default: null
|
||||
password:
|
||||
description:
|
||||
- Password of the user to be created if account did not exist.
|
||||
- Required on C(state=present).
|
||||
required: false
|
||||
default: null
|
||||
first_name:
|
||||
description:
|
||||
- First name of the user to be created if account did not exist.
|
||||
- Required on C(state=present).
|
||||
required: false
|
||||
default: null
|
||||
last_name:
|
||||
description:
|
||||
- Last name of the user to be created if account did not exist.
|
||||
- Required on C(state=present).
|
||||
required: false
|
||||
default: null
|
||||
email:
|
||||
description:
|
||||
- Email of the user to be created if account did not exist.
|
||||
- Required on C(state=present).
|
||||
required: false
|
||||
default: null
|
||||
timezone:
|
||||
description:
|
||||
- Timezone of the user to be created if account did not exist.
|
||||
required: false
|
||||
default: null
|
||||
network_domain:
|
||||
description:
|
||||
- Network domain of the account.
|
||||
required: false
|
||||
default: null
|
||||
account_type:
|
||||
description:
|
||||
- Type of the account.
|
||||
required: false
|
||||
default: 'user'
|
||||
choices: [ 'user', 'root_admin', 'domain_admin' ]
|
||||
domain:
|
||||
description:
|
||||
- Domain the account is related to.
|
||||
required: false
|
||||
default: 'ROOT'
|
||||
state:
|
||||
description:
|
||||
- State of the account.
|
||||
required: false
|
||||
default: 'present'
|
||||
choices: [ 'present', 'absent', 'enabled', 'disabled', 'locked' ]
|
||||
poll_async:
|
||||
description:
|
||||
- Poll async jobs until job has finished.
|
||||
required: false
|
||||
default: true
|
||||
extends_documentation_fragment: cloudstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# create an account in domain 'CUSTOMERS'
|
||||
local_action:
|
||||
module: cs_account
|
||||
name: customer_xy
|
||||
username: customer_xy
|
||||
password: S3Cur3
|
||||
last_name: Doe
|
||||
first_name: John
|
||||
email: john.doe@example.com
|
||||
domain: CUSTOMERS
|
||||
|
||||
|
||||
# Lock an existing account in domain 'CUSTOMERS'
|
||||
local_action:
|
||||
module: cs_account
|
||||
name: customer_xy
|
||||
domain: CUSTOMERS
|
||||
state: locked
|
||||
|
||||
|
||||
# Disable an existing account in domain 'CUSTOMERS'
|
||||
local_action:
|
||||
module: cs_account
|
||||
name: customer_xy
|
||||
domain: CUSTOMERS
|
||||
state: disabled
|
||||
|
||||
|
||||
# Enable an existing account in domain 'CUSTOMERS'
|
||||
local_action:
|
||||
module: cs_account
|
||||
name: customer_xy
|
||||
domain: CUSTOMERS
|
||||
state: enabled
|
||||
|
||||
|
||||
# Remove an account in domain 'CUSTOMERS'
|
||||
local_action:
|
||||
module: cs_account
|
||||
name: customer_xy
|
||||
domain: CUSTOMERS
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
name:
|
||||
description: Name of the account.
|
||||
returned: success
|
||||
type: string
|
||||
sample: linus@example.com
|
||||
account_type:
|
||||
description: Type of the account.
|
||||
returned: success
|
||||
type: string
|
||||
sample: user
|
||||
account_state:
|
||||
description: State of the account.
|
||||
returned: success
|
||||
type: string
|
||||
sample: enabled
|
||||
network_domain:
|
||||
description: Network domain of the account.
|
||||
returned: success
|
||||
type: string
|
||||
sample: example.local
|
||||
domain:
|
||||
description: Domain the account is related.
|
||||
returned: success
|
||||
type: string
|
||||
sample: ROOT
|
||||
'''
|
||||
|
||||
try:
|
||||
from cs import CloudStack, CloudStackException, read_config
|
||||
has_lib_cs = True
|
||||
except ImportError:
|
||||
has_lib_cs = False
|
||||
|
||||
# import cloudstack common
|
||||
from ansible.module_utils.cloudstack import *
|
||||
|
||||
|
||||
class AnsibleCloudStackAccount(AnsibleCloudStack):
|
||||
|
||||
def __init__(self, module):
|
||||
AnsibleCloudStack.__init__(self, module)
|
||||
self.account = None
|
||||
self.account_types = {
|
||||
'user': 0,
|
||||
'root_admin': 1,
|
||||
'domain_admin': 2,
|
||||
}
|
||||
|
||||
|
||||
def get_account_type(self):
|
||||
account_type = self.module.params.get('account_type')
|
||||
return self.account_types[account_type]
|
||||
|
||||
|
||||
def get_account(self):
|
||||
if not self.account:
|
||||
args = {}
|
||||
args['listall'] = True
|
||||
args['domainid'] = self.get_domain('id')
|
||||
accounts = self.cs.listAccounts(**args)
|
||||
if accounts:
|
||||
account_name = self.module.params.get('name')
|
||||
for a in accounts['account']:
|
||||
if account_name in [ a['name'] ]:
|
||||
self.account = a
|
||||
break
|
||||
|
||||
return self.account
|
||||
|
||||
|
||||
def enable_account(self):
|
||||
account = self.get_account()
|
||||
if not account:
|
||||
self.module.fail_json(msg="Failed: account not present")
|
||||
|
||||
if account['state'].lower() != 'enabled':
|
||||
self.result['changed'] = True
|
||||
args = {}
|
||||
args['id'] = account['id']
|
||||
args['account'] = self.module.params.get('name')
|
||||
args['domainid'] = self.get_domain('id')
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.enableAccount(**args)
|
||||
if 'errortext' in res:
|
||||
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||
account = res['account']
|
||||
return account
|
||||
|
||||
|
||||
def lock_account(self):
|
||||
return self.lock_or_disable_account(lock=True)
|
||||
|
||||
|
||||
def disable_account(self):
|
||||
return self.lock_or_disable_account()
|
||||
|
||||
|
||||
def lock_or_disable_account(self, lock=False):
|
||||
account = self.get_account()
|
||||
if not account:
|
||||
self.module.fail_json(msg="Failed: account not present")
|
||||
|
||||
# we need to enable the account to lock it.
|
||||
if lock and account['state'].lower() == 'disabled':
|
||||
account = self.enable_account()
|
||||
|
||||
if lock and account['state'].lower() != 'locked' \
|
||||
or not lock and account['state'].lower() != 'disabled':
|
||||
self.result['changed'] = True
|
||||
args = {}
|
||||
args['id'] = account['id']
|
||||
args['account'] = self.module.params.get('name')
|
||||
args['domainid'] = self.get_domain('id')
|
||||
args['lock'] = lock
|
||||
if not self.module.check_mode:
|
||||
account = self.cs.disableAccount(**args)
|
||||
|
||||
if 'errortext' in account:
|
||||
self.module.fail_json(msg="Failed: '%s'" % account['errortext'])
|
||||
|
||||
poll_async = self.module.params.get('poll_async')
|
||||
if poll_async:
|
||||
account = self._poll_job(account, 'account')
|
||||
return account
|
||||
|
||||
|
||||
def present_account(self):
|
||||
missing_params = []
|
||||
|
||||
if not self.module.params.get('email'):
|
||||
missing_params.append('email')
|
||||
|
||||
if not self.module.params.get('username'):
|
||||
missing_params.append('username')
|
||||
|
||||
if not self.module.params.get('password'):
|
||||
missing_params.append('password')
|
||||
|
||||
if not self.module.params.get('first_name'):
|
||||
missing_params.append('first_name')
|
||||
|
||||
if not self.module.params.get('last_name'):
|
||||
missing_params.append('last_name')
|
||||
|
||||
if missing_params:
|
||||
self.module.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
|
||||
|
||||
account = self.get_account()
|
||||
|
||||
if not account:
|
||||
self.result['changed'] = True
|
||||
|
||||
args = {}
|
||||
args['account'] = self.module.params.get('name')
|
||||
args['domainid'] = self.get_domain('id')
|
||||
args['accounttype'] = self.get_account_type()
|
||||
args['networkdomain'] = self.module.params.get('network_domain')
|
||||
args['username'] = self.module.params.get('username')
|
||||
args['password'] = self.module.params.get('password')
|
||||
args['firstname'] = self.module.params.get('first_name')
|
||||
args['lastname'] = self.module.params.get('last_name')
|
||||
args['email'] = self.module.params.get('email')
|
||||
args['timezone'] = self.module.params.get('timezone')
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.createAccount(**args)
|
||||
if 'errortext' in res:
|
||||
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||
account = res['account']
|
||||
return account
|
||||
|
||||
|
||||
def absent_account(self):
|
||||
account = self.get_account()
|
||||
if account:
|
||||
self.result['changed'] = True
|
||||
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.deleteAccount(id=account['id'])
|
||||
|
||||
if 'errortext' in account:
|
||||
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||
|
||||
poll_async = self.module.params.get('poll_async')
|
||||
if poll_async:
|
||||
res = self._poll_job(res, 'account')
|
||||
return account
|
||||
|
||||
|
||||
def get_result(self, account):
|
||||
if account:
|
||||
if 'name' in account:
|
||||
self.result['name'] = account['name']
|
||||
if 'accounttype' in account:
|
||||
for key,value in self.account_types.items():
|
||||
if value == account['accounttype']:
|
||||
self.result['account_type'] = key
|
||||
break
|
||||
if 'state' in account:
|
||||
self.result['account_state'] = account['state']
|
||||
if 'domain' in account:
|
||||
self.result['domain'] = account['domain']
|
||||
if 'networkdomain' in account:
|
||||
self.result['network_domain'] = account['networkdomain']
|
||||
return self.result
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
name = dict(required=True),
|
||||
state = dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked' ], default='present'),
|
||||
account_type = dict(choices=['user', 'root_admin', 'domain_admin'], default='user'),
|
||||
network_domain = dict(default=None),
|
||||
domain = dict(default='ROOT'),
|
||||
email = dict(default=None),
|
||||
first_name = dict(default=None),
|
||||
last_name = dict(default=None),
|
||||
username = dict(default=None),
|
||||
password = dict(default=None),
|
||||
timezone = dict(default=None),
|
||||
poll_async = dict(choices=BOOLEANS, default=True),
|
||||
api_key = dict(default=None),
|
||||
api_secret = dict(default=None, no_log=True),
|
||||
api_url = dict(default=None),
|
||||
api_http_method = dict(default='get'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if not has_lib_cs:
|
||||
module.fail_json(msg="python library cs required: pip install cs")
|
||||
|
||||
try:
|
||||
acs_acc = AnsibleCloudStackAccount(module)
|
||||
|
||||
state = module.params.get('state')
|
||||
|
||||
if state in ['absent']:
|
||||
account = acs_acc.absent_account()
|
||||
|
||||
elif state in ['enabled']:
|
||||
account = acs_acc.enable_account()
|
||||
|
||||
elif state in ['disabled']:
|
||||
account = acs_acc.disable_account()
|
||||
|
||||
elif state in ['locked']:
|
||||
account = acs_acc.lock_account()
|
||||
|
||||
else:
|
||||
account = acs_acc.present_account()
|
||||
|
||||
result = acs_acc.get_result(account)
|
||||
|
||||
except CloudStackException, e:
|
||||
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||
|
||||
except Exception, e:
|
||||
module.fail_json(msg='Exception: %s' % str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
|
@ -25,7 +25,7 @@ short_description: Manages affinity groups on Apache CloudStack based clouds.
|
|||
description:
|
||||
- Create and remove affinity groups.
|
||||
version_added: '2.0'
|
||||
author: René Moser
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
@ -47,6 +47,16 @@ options:
|
|||
required: false
|
||||
default: 'present'
|
||||
choices: [ 'present', 'absent' ]
|
||||
domain:
|
||||
description:
|
||||
- Domain the affinity group is related to.
|
||||
required: false
|
||||
default: null
|
||||
account:
|
||||
description:
|
||||
- Account the affinity group is related to.
|
||||
required: false
|
||||
default: null
|
||||
poll_async:
|
||||
description:
|
||||
- Poll async jobs until job has finished.
|
||||
|
@ -56,7 +66,6 @@ extends_documentation_fragment: cloudstack
|
|||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Create a affinity group
|
||||
- local_action:
|
||||
module: cs_affinitygroup
|
||||
|
@ -104,20 +113,21 @@ class AnsibleCloudStackAffinityGroup(AnsibleCloudStack):
|
|||
|
||||
def __init__(self, module):
|
||||
AnsibleCloudStack.__init__(self, module)
|
||||
self.result = {
|
||||
'changed': False,
|
||||
}
|
||||
self.affinity_group = None
|
||||
|
||||
|
||||
def get_affinity_group(self):
|
||||
if not self.affinity_group:
|
||||
affinity_group_name = self.module.params.get('name')
|
||||
affinity_group = self.module.params.get('name')
|
||||
|
||||
affinity_groups = self.cs.listAffinityGroups()
|
||||
args = {}
|
||||
args['account'] = self.get_account('name')
|
||||
args['domainid'] = self.get_domain('id')
|
||||
|
||||
affinity_groups = self.cs.listAffinityGroups(**args)
|
||||
if affinity_groups:
|
||||
for a in affinity_groups['affinitygroup']:
|
||||
if a['name'] == affinity_group_name:
|
||||
if affinity_group in [ a['name'], a['id'] ]:
|
||||
self.affinity_group = a
|
||||
break
|
||||
return self.affinity_group
|
||||
|
@ -142,10 +152,12 @@ class AnsibleCloudStackAffinityGroup(AnsibleCloudStack):
|
|||
if not affinity_group:
|
||||
self.result['changed'] = True
|
||||
|
||||
args = {}
|
||||
args['name'] = self.module.params.get('name')
|
||||
args['type'] = self.get_affinity_type()
|
||||
args = {}
|
||||
args['name'] = self.module.params.get('name')
|
||||
args['type'] = self.get_affinity_type()
|
||||
args['description'] = self.module.params.get('description')
|
||||
args['account'] = self.get_account('name')
|
||||
args['domainid'] = self.get_domain('id')
|
||||
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.createAffinityGroup(**args)
|
||||
|
@ -156,7 +168,6 @@ class AnsibleCloudStackAffinityGroup(AnsibleCloudStack):
|
|||
poll_async = self.module.params.get('poll_async')
|
||||
if res and poll_async:
|
||||
affinity_group = self._poll_job(res, 'affinitygroup')
|
||||
|
||||
return affinity_group
|
||||
|
||||
|
||||
|
@ -165,8 +176,10 @@ class AnsibleCloudStackAffinityGroup(AnsibleCloudStack):
|
|||
if affinity_group:
|
||||
self.result['changed'] = True
|
||||
|
||||
args = {}
|
||||
args['name'] = self.module.params.get('name')
|
||||
args = {}
|
||||
args['name'] = self.module.params.get('name')
|
||||
args['account'] = self.get_account('name')
|
||||
args['domainid'] = self.get_domain('id')
|
||||
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.deleteAffinityGroup(**args)
|
||||
|
@ -177,7 +190,6 @@ class AnsibleCloudStackAffinityGroup(AnsibleCloudStack):
|
|||
poll_async = self.module.params.get('poll_async')
|
||||
if res and poll_async:
|
||||
res = self._poll_job(res, 'affinitygroup')
|
||||
|
||||
return affinity_group
|
||||
|
||||
|
||||
|
@ -189,6 +201,10 @@ class AnsibleCloudStackAffinityGroup(AnsibleCloudStack):
|
|||
self.result['description'] = affinity_group['description']
|
||||
if 'type' in affinity_group:
|
||||
self.result['affinity_type'] = affinity_group['type']
|
||||
if 'domain' in affinity_group:
|
||||
self.result['domain'] = affinity_group['domain']
|
||||
if 'account' in affinity_group:
|
||||
self.result['account'] = affinity_group['account']
|
||||
return self.result
|
||||
|
||||
|
||||
|
@ -199,9 +215,11 @@ def main():
|
|||
affinty_type = dict(default=None),
|
||||
description = dict(default=None),
|
||||
state = dict(choices=['present', 'absent'], default='present'),
|
||||
domain = dict(default=None),
|
||||
account = dict(default=None),
|
||||
poll_async = dict(choices=BOOLEANS, default=True),
|
||||
api_key = dict(default=None),
|
||||
api_secret = dict(default=None),
|
||||
api_secret = dict(default=None, no_log=True),
|
||||
api_url = dict(default=None),
|
||||
api_http_method = dict(default='get'),
|
||||
),
|
||||
|
@ -225,6 +243,9 @@ def main():
|
|||
except CloudStackException, e:
|
||||
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||
|
||||
except Exception, e:
|
||||
module.fail_json(msg='Exception: %s' % str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
# import module snippets
|
||||
|
|
|
@ -19,29 +19,45 @@
|
|||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: cs_firewall
|
||||
short_description: Manages firewall rules on Apache CloudStack based clouds.
|
||||
description:
|
||||
- Creates and removes firewall rules.
|
||||
version_added: '2.0'
|
||||
author: René Moser
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
options:
|
||||
ip_address:
|
||||
description:
|
||||
- Public IP address the rule is assigned to.
|
||||
required: true
|
||||
- Public IP address the ingress rule is assigned to.
|
||||
- Required if C(type=ingress).
|
||||
required: false
|
||||
default: null
|
||||
network:
|
||||
description:
|
||||
- Network the egress rule is related to.
|
||||
- Required if C(type=egress).
|
||||
required: false
|
||||
default: null
|
||||
state:
|
||||
description:
|
||||
- State of the firewall rule.
|
||||
required: false
|
||||
default: 'present'
|
||||
choices: [ 'present', 'absent' ]
|
||||
type:
|
||||
description:
|
||||
- Type of the firewall rule.
|
||||
required: false
|
||||
default: 'ingress'
|
||||
choices: [ 'ingress', 'egress' ]
|
||||
protocol:
|
||||
description:
|
||||
- Protocol of the firewall rule.
|
||||
- C(all) is only available if C(type=egress)
|
||||
required: false
|
||||
default: 'tcp'
|
||||
choices: [ 'tcp', 'udp', 'icmp' ]
|
||||
choices: [ 'tcp', 'udp', 'icmp', 'all' ]
|
||||
cidr:
|
||||
description:
|
||||
- CIDR (full notation) to be used for firewall rule.
|
||||
|
@ -52,9 +68,10 @@ options:
|
|||
- Start port for this rule. Considered if C(protocol=tcp) or C(protocol=udp).
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ 'port' ]
|
||||
end_port:
|
||||
description:
|
||||
- End port for this rule. Considered if C(protocol=tcp) or C(protocol=udp).
|
||||
- End port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). If not specified, equal C(start_port).
|
||||
required: false
|
||||
default: null
|
||||
icmp_type:
|
||||
|
@ -67,22 +84,35 @@ options:
|
|||
- Error code for this icmp message. Considered if C(protocol=icmp).
|
||||
required: false
|
||||
default: null
|
||||
project:
|
||||
domain:
|
||||
description:
|
||||
- Name of the project.
|
||||
- Domain the firewall rule is related to.
|
||||
required: false
|
||||
default: null
|
||||
account:
|
||||
description:
|
||||
- Account the firewall rule is related to.
|
||||
required: false
|
||||
default: null
|
||||
project:
|
||||
description:
|
||||
- Name of the project the firewall rule is related to.
|
||||
required: false
|
||||
default: null
|
||||
poll_async:
|
||||
description:
|
||||
- Poll async jobs until job has finished.
|
||||
required: false
|
||||
default: true
|
||||
extends_documentation_fragment: cloudstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Allow inbound port 80/tcp from 1.2.3.4 to 4.3.2.1
|
||||
- local_action:
|
||||
module: cs_firewall
|
||||
ip_address: 4.3.2.1
|
||||
start_port: 80
|
||||
end_port: 80
|
||||
port: 80
|
||||
cidr: 1.2.3.4/32
|
||||
|
||||
|
||||
|
@ -90,8 +120,7 @@ EXAMPLES = '''
|
|||
- local_action:
|
||||
module: cs_firewall
|
||||
ip_address: 4.3.2.1
|
||||
start_port: 53
|
||||
end_port: 53
|
||||
port: 53
|
||||
protocol: '{{ item }}'
|
||||
with_items:
|
||||
- tcp
|
||||
|
@ -106,6 +135,72 @@ EXAMPLES = '''
|
|||
end_port: 8888
|
||||
cidr: 17.0.0.0/8
|
||||
state: absent
|
||||
|
||||
|
||||
# Allow all outbound traffic
|
||||
- local_action:
|
||||
module: cs_firewall
|
||||
network: my_network
|
||||
type: egress
|
||||
protocol: all
|
||||
|
||||
|
||||
# Allow only HTTP outbound traffic for an IP
|
||||
- local_action:
|
||||
module: cs_firewall
|
||||
network: my_network
|
||||
type: egress
|
||||
port: 80
|
||||
cidr: 10.101.1.20
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
ip_address:
|
||||
description: IP address of the rule if C(type=ingress)
|
||||
returned: success
|
||||
type: string
|
||||
sample: 10.100.212.10
|
||||
type:
|
||||
description: Type of the rule.
|
||||
returned: success
|
||||
type: string
|
||||
sample: ingress
|
||||
cidr:
|
||||
description: CIDR of the rule.
|
||||
returned: success
|
||||
type: string
|
||||
sample: 0.0.0.0/0
|
||||
protocol:
|
||||
description: Protocol of the rule.
|
||||
returned: success
|
||||
type: string
|
||||
sample: tcp
|
||||
start_port:
|
||||
description: Start port of the rule.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 80
|
||||
end_port:
|
||||
description: End port of the rule.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 80
|
||||
icmp_code:
|
||||
description: ICMP code of the rule.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 1
|
||||
icmp_type:
|
||||
description: ICMP type of the rule.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 1
|
||||
network:
|
||||
description: Name of the network if C(type=egress)
|
||||
returned: success
|
||||
type: string
|
||||
sample: my_network
|
||||
'''
|
||||
|
||||
try:
|
||||
|
@ -122,38 +217,57 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack):
|
|||
|
||||
def __init__(self, module):
|
||||
AnsibleCloudStack.__init__(self, module)
|
||||
self.result = {
|
||||
'changed': False,
|
||||
}
|
||||
self.firewall_rule = None
|
||||
|
||||
|
||||
def get_end_port(self):
|
||||
if self.module.params.get('end_port'):
|
||||
return self.module.params.get('end_port')
|
||||
return self.module.params.get('start_port')
|
||||
|
||||
|
||||
def get_firewall_rule(self):
|
||||
if not self.firewall_rule:
|
||||
cidr = self.module.params.get('cidr')
|
||||
protocol = self.module.params.get('protocol')
|
||||
start_port = self.module.params.get('start_port')
|
||||
end_port = self.module.params.get('end_port')
|
||||
icmp_code = self.module.params.get('icmp_code')
|
||||
icmp_type = self.module.params.get('icmp_type')
|
||||
cidr = self.module.params.get('cidr')
|
||||
protocol = self.module.params.get('protocol')
|
||||
start_port = self.module.params.get('start_port')
|
||||
end_port = self.get_end_port()
|
||||
icmp_code = self.module.params.get('icmp_code')
|
||||
icmp_type = self.module.params.get('icmp_type')
|
||||
fw_type = self.module.params.get('type')
|
||||
|
||||
if protocol in ['tcp', 'udp'] and not (start_port and end_port):
|
||||
self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol)
|
||||
self.module.fail_json(msg="missing required argument for protocol '%s': start_port or end_port" % protocol)
|
||||
|
||||
if protocol == 'icmp' and not icmp_type:
|
||||
self.module.fail_json(msg="no icmp_type set")
|
||||
self.module.fail_json(msg="missing required argument for protocol 'icmp': icmp_type")
|
||||
|
||||
args = {}
|
||||
args['ipaddressid'] = self.get_ip_address_id()
|
||||
args['projectid'] = self.get_project_id()
|
||||
if protocol == 'all' and fw_type != 'egress':
|
||||
self.module.fail_json(msg="protocol 'all' could only be used for type 'egress'" )
|
||||
|
||||
args = {}
|
||||
args['account'] = self.get_account('name')
|
||||
args['domainid'] = self.get_domain('id')
|
||||
args['projectid'] = self.get_project('id')
|
||||
|
||||
if fw_type == 'egress':
|
||||
args['networkid'] = self.get_network(key='id')
|
||||
if not args['networkid']:
|
||||
self.module.fail_json(msg="missing required argument for type egress: network")
|
||||
firewall_rules = self.cs.listEgressFirewallRules(**args)
|
||||
else:
|
||||
args['ipaddressid'] = self.get_ip_address('id')
|
||||
if not args['ipaddressid']:
|
||||
self.module.fail_json(msg="missing required argument for type ingress: ip_address")
|
||||
firewall_rules = self.cs.listFirewallRules(**args)
|
||||
|
||||
firewall_rules = self.cs.listFirewallRules(**args)
|
||||
if firewall_rules and 'firewallrule' in firewall_rules:
|
||||
for rule in firewall_rules['firewallrule']:
|
||||
type_match = self._type_cidr_match(rule, cidr)
|
||||
|
||||
protocol_match = self._tcp_udp_match(rule, protocol, start_port, end_port) \
|
||||
or self._icmp_match(rule, protocol, icmp_code, icmp_type)
|
||||
or self._icmp_match(rule, protocol, icmp_code, icmp_type) \
|
||||
or self._egress_all_match(rule, protocol, fw_type)
|
||||
|
||||
if type_match and protocol_match:
|
||||
self.firewall_rule = rule
|
||||
|
@ -168,6 +282,12 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack):
|
|||
and end_port == int(rule['endport'])
|
||||
|
||||
|
||||
def _egress_all_match(self, rule, protocol, fw_type):
|
||||
return protocol in ['all'] \
|
||||
and protocol == rule['protocol'] \
|
||||
and fw_type == 'egress'
|
||||
|
||||
|
||||
def _icmp_match(self, rule, protocol, icmp_code, icmp_type):
|
||||
return protocol == 'icmp' \
|
||||
and protocol == rule['protocol'] \
|
||||
|
@ -179,22 +299,58 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack):
|
|||
return cidr == rule['cidrlist']
|
||||
|
||||
|
||||
def get_network(self, key=None, network=None):
|
||||
if not network:
|
||||
network = self.module.params.get('network')
|
||||
|
||||
if not network:
|
||||
return None
|
||||
|
||||
args = {}
|
||||
args['account'] = self.get_account('name')
|
||||
args['domainid'] = self.get_domain('id')
|
||||
args['projectid'] = self.get_project('id')
|
||||
args['zoneid'] = self.get_zone('id')
|
||||
|
||||
networks = self.cs.listNetworks(**args)
|
||||
if not networks:
|
||||
self.module.fail_json(msg="No networks available")
|
||||
|
||||
for n in networks['network']:
|
||||
if network in [ n['displaytext'], n['name'], n['id'] ]:
|
||||
return self._get_by_key(key, n)
|
||||
break
|
||||
self.module.fail_json(msg="Network '%s' not found" % network)
|
||||
|
||||
|
||||
def create_firewall_rule(self):
|
||||
firewall_rule = self.get_firewall_rule()
|
||||
if not firewall_rule:
|
||||
self.result['changed'] = True
|
||||
args = {}
|
||||
args['cidrlist'] = self.module.params.get('cidr')
|
||||
args['protocol'] = self.module.params.get('protocol')
|
||||
args['startport'] = self.module.params.get('start_port')
|
||||
args['endport'] = self.module.params.get('end_port')
|
||||
args['icmptype'] = self.module.params.get('icmp_type')
|
||||
args['icmpcode'] = self.module.params.get('icmp_code')
|
||||
args['ipaddressid'] = self.get_ip_address_id()
|
||||
|
||||
args = {}
|
||||
args['cidrlist'] = self.module.params.get('cidr')
|
||||
args['protocol'] = self.module.params.get('protocol')
|
||||
args['startport'] = self.module.params.get('start_port')
|
||||
args['endport'] = self.get_end_port()
|
||||
args['icmptype'] = self.module.params.get('icmp_type')
|
||||
args['icmpcode'] = self.module.params.get('icmp_code')
|
||||
|
||||
fw_type = self.module.params.get('type')
|
||||
if not self.module.check_mode:
|
||||
firewall_rule = self.cs.createFirewallRule(**args)
|
||||
if fw_type == 'egress':
|
||||
args['networkid'] = self.get_network(key='id')
|
||||
res = self.cs.createEgressFirewallRule(**args)
|
||||
else:
|
||||
args['ipaddressid'] = self.get_ip_address('id')
|
||||
res = self.cs.createFirewallRule(**args)
|
||||
|
||||
if 'errortext' in res:
|
||||
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||
|
||||
poll_async = self.module.params.get('poll_async')
|
||||
if poll_async:
|
||||
firewall_rule = self._poll_job(res, 'firewallrule')
|
||||
return firewall_rule
|
||||
|
||||
|
||||
|
@ -202,42 +358,74 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack):
|
|||
firewall_rule = self.get_firewall_rule()
|
||||
if firewall_rule:
|
||||
self.result['changed'] = True
|
||||
args = {}
|
||||
|
||||
args = {}
|
||||
args['id'] = firewall_rule['id']
|
||||
|
||||
fw_type = self.module.params.get('type')
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.deleteFirewallRule(**args)
|
||||
if fw_type == 'egress':
|
||||
res = self.cs.deleteEgressFirewallRule(**args)
|
||||
else:
|
||||
res = self.cs.deleteFirewallRule(**args)
|
||||
|
||||
if 'errortext' in res:
|
||||
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||
|
||||
poll_async = self.module.params.get('poll_async')
|
||||
if poll_async:
|
||||
res = self._poll_job(res, 'firewallrule')
|
||||
return firewall_rule
|
||||
|
||||
|
||||
def get_result(self, firewall_rule):
|
||||
if firewall_rule:
|
||||
self.result['type'] = self.module.params.get('type')
|
||||
if 'cidrlist' in firewall_rule:
|
||||
self.result['cidr'] = firewall_rule['cidrlist']
|
||||
if 'startport' in firewall_rule:
|
||||
self.result['start_port'] = int(firewall_rule['startport'])
|
||||
if 'endport' in firewall_rule:
|
||||
self.result['end_port'] = int(firewall_rule['endport'])
|
||||
if 'protocol' in firewall_rule:
|
||||
self.result['protocol'] = firewall_rule['protocol']
|
||||
if 'ipaddress' in firewall_rule:
|
||||
self.result['ip_address'] = firewall_rule['ipaddress']
|
||||
if 'icmpcode' in firewall_rule:
|
||||
self.result['icmp_code'] = int(firewall_rule['icmpcode'])
|
||||
if 'icmptype' in firewall_rule:
|
||||
self.result['icmp_type'] = int(firewall_rule['icmptype'])
|
||||
if 'networkid' in firewall_rule:
|
||||
self.result['network'] = self.get_network(key='displaytext', network=firewall_rule['networkid'])
|
||||
return self.result
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
ip_address = dict(required=True, default=None),
|
||||
ip_address = dict(default=None),
|
||||
network = dict(default=None),
|
||||
cidr = dict(default='0.0.0.0/0'),
|
||||
protocol = dict(choices=['tcp', 'udp', 'icmp'], default='tcp'),
|
||||
protocol = dict(choices=['tcp', 'udp', 'icmp', 'all'], default='tcp'),
|
||||
type = dict(choices=['ingress', 'egress'], default='ingress'),
|
||||
icmp_type = dict(type='int', default=None),
|
||||
icmp_code = dict(type='int', default=None),
|
||||
start_port = dict(type='int', default=None),
|
||||
start_port = dict(type='int', aliases=['port'], default=None),
|
||||
end_port = dict(type='int', default=None),
|
||||
state = dict(choices=['present', 'absent'], default='present'),
|
||||
domain = dict(default=None),
|
||||
account = dict(default=None),
|
||||
project = dict(default=None),
|
||||
poll_async = dict(choices=BOOLEANS, default=True),
|
||||
api_key = dict(default=None),
|
||||
api_secret = dict(default=None),
|
||||
api_secret = dict(default=None, no_log=True),
|
||||
api_url = dict(default=None),
|
||||
api_http_method = dict(default='get'),
|
||||
),
|
||||
required_together = (
|
||||
['start_port', 'end_port'],
|
||||
),
|
||||
mutually_exclusive = (
|
||||
['icmp_type', 'start_port'],
|
||||
['icmp_type', 'end_port'],
|
||||
['ip_address', 'network'],
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
@ -259,6 +447,9 @@ def main():
|
|||
except CloudStackException, e:
|
||||
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||
|
||||
except Exception, e:
|
||||
module.fail_json(msg='Exception: %s' % str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
# import module snippets
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: Manages instances and virtual machines on Apache CloudStack b
|
|||
description:
|
||||
- Deploy, start, restart, stop and destroy instances on Apache CloudStack, Citrix CloudPlatform and Exoscale.
|
||||
version_added: '2.0'
|
||||
author: René Moser
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
@ -106,6 +106,16 @@ options:
|
|||
required: false
|
||||
default: []
|
||||
aliases: [ 'security_group' ]
|
||||
domain:
|
||||
description:
|
||||
- Domain the instance is related to.
|
||||
required: false
|
||||
default: null
|
||||
account:
|
||||
description:
|
||||
- Account the instance is related to.
|
||||
required: false
|
||||
default: null
|
||||
project:
|
||||
description:
|
||||
- Name of the project the instance to be deployed in.
|
||||
|
@ -154,7 +164,6 @@ extends_documentation_fragment: cloudstack
|
|||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Create a instance on CloudStack from an ISO
|
||||
# NOTE: Names of offerings and ISOs depending on the CloudStack configuration.
|
||||
- local_action:
|
||||
|
@ -252,6 +261,16 @@ ssh_key:
|
|||
returned: success
|
||||
type: string
|
||||
sample: key@work
|
||||
domain:
|
||||
description: Domain the instance is related to.
|
||||
returned: success
|
||||
type: string
|
||||
sample: example domain
|
||||
account:
|
||||
description: Account the instance is related to.
|
||||
returned: success
|
||||
type: string
|
||||
sample: example account
|
||||
project:
|
||||
description: Name of project the instance is related to.
|
||||
returned: success
|
||||
|
@ -352,8 +371,15 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
|||
if template and iso:
|
||||
self.module.fail_json(msg="Template are ISO are mutually exclusive.")
|
||||
|
||||
args = {}
|
||||
args['account'] = self.get_account('name')
|
||||
args['domainid'] = self.get_domain('id')
|
||||
args['projectid'] = self.get_project('id')
|
||||
args['zoneid'] = self.get_zone('id')
|
||||
|
||||
if template:
|
||||
templates = self.cs.listTemplates(templatefilter='executable')
|
||||
args['templatefilter'] = 'executable'
|
||||
templates = self.cs.listTemplates(**args)
|
||||
if templates:
|
||||
for t in templates['template']:
|
||||
if template in [ t['displaytext'], t['name'], t['id'] ]:
|
||||
|
@ -361,7 +387,8 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
|||
self.module.fail_json(msg="Template '%s' not found" % template)
|
||||
|
||||
elif iso:
|
||||
isos = self.cs.listIsos()
|
||||
args['isofilter'] = 'executable'
|
||||
isos = self.cs.listIsos(**args)
|
||||
if isos:
|
||||
for i in isos['iso']:
|
||||
if iso in [ i['displaytext'], i['name'], i['id'] ]:
|
||||
|
@ -375,7 +402,10 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
|||
if not disk_offering:
|
||||
return None
|
||||
|
||||
disk_offerings = self.cs.listDiskOfferings()
|
||||
args = {}
|
||||
args['domainid'] = self.get_domain('id')
|
||||
|
||||
disk_offerings = self.cs.listDiskOfferings(**args)
|
||||
if disk_offerings:
|
||||
for d in disk_offerings['diskoffering']:
|
||||
if disk_offering in [ d['displaytext'], d['name'], d['id'] ]:
|
||||
|
@ -388,9 +418,12 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
|||
if not instance:
|
||||
instance_name = self.module.params.get('name')
|
||||
|
||||
args = {}
|
||||
args['projectid'] = self.get_project_id()
|
||||
args['zoneid'] = self.get_zone_id()
|
||||
args = {}
|
||||
args['account'] = self.get_account('name')
|
||||
args['domainid'] = self.get_domain('id')
|
||||
args['projectid'] = self.get_project('id')
|
||||
args['zoneid'] = self.get_zone('id')
|
||||
|
||||
instances = self.cs.listVirtualMachines(**args)
|
||||
if instances:
|
||||
for v in instances['virtualmachine']:
|
||||
|
@ -405,9 +438,12 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
|||
if not network_names:
|
||||
return None
|
||||
|
||||
args = {}
|
||||
args['zoneid'] = self.get_zone_id()
|
||||
args['projectid'] = self.get_project_id()
|
||||
args = {}
|
||||
args['account'] = self.get_account('name')
|
||||
args['domainid'] = self.get_domain('id')
|
||||
args['projectid'] = self.get_project('id')
|
||||
args['zoneid'] = self.get_zone('id')
|
||||
|
||||
networks = self.cs.listNetworks(**args)
|
||||
if not networks:
|
||||
self.module.fail_json(msg="No networks available")
|
||||
|
@ -458,9 +494,11 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
|||
|
||||
args = {}
|
||||
args['templateid'] = self.get_template_or_iso_id()
|
||||
args['zoneid'] = self.get_zone_id()
|
||||
args['zoneid'] = self.get_zone('id')
|
||||
args['serviceofferingid'] = self.get_service_offering_id()
|
||||
args['projectid'] = self.get_project_id()
|
||||
args['account'] = self.get_account('name')
|
||||
args['domainid'] = self.get_domain('id')
|
||||
args['projectid'] = self.get_project('id')
|
||||
args['diskofferingid'] = self.get_disk_offering_id()
|
||||
args['networkids'] = self.get_network_ids()
|
||||
args['hypervisor'] = self.get_hypervisor()
|
||||
|
@ -498,12 +536,12 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
|||
args_instance_update['group'] = self.module.params.get('group')
|
||||
args_instance_update['displayname'] = self.get_display_name()
|
||||
args_instance_update['userdata'] = self.get_user_data()
|
||||
args_instance_update['ostypeid'] = self.get_os_type_id()
|
||||
args_instance_update['ostypeid'] = self.get_os_type('id')
|
||||
|
||||
args_ssh_key = {}
|
||||
args_ssh_key['id'] = instance['id']
|
||||
args_ssh_key['keypair'] = self.module.params.get('ssh_key')
|
||||
args_ssh_key['projectid'] = self.get_project_id()
|
||||
args_ssh_key['projectid'] = self.get_project('id')
|
||||
|
||||
if self._has_changed(args_service_offering, instance) or \
|
||||
self._has_changed(args_instance_update, instance) or \
|
||||
|
@ -668,6 +706,10 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
|||
self.result['display_name'] = instance['displayname']
|
||||
if 'group' in instance:
|
||||
self.result['group'] = instance['group']
|
||||
if 'domain' in instance:
|
||||
self.result['domain'] = instance['domain']
|
||||
if 'account' in instance:
|
||||
self.result['account'] = instance['account']
|
||||
if 'project' in instance:
|
||||
self.result['project'] = instance['project']
|
||||
if 'publicip' in instance:
|
||||
|
@ -732,6 +774,8 @@ def main():
|
|||
hypervisor = dict(default=None),
|
||||
security_groups = dict(type='list', aliases=[ 'security_group' ], default=[]),
|
||||
affinity_groups = dict(type='list', aliases=[ 'affinity_group' ], default=[]),
|
||||
domain = dict(default=None),
|
||||
account = dict(default=None),
|
||||
project = dict(default=None),
|
||||
user_data = dict(default=None),
|
||||
zone = dict(default=None),
|
||||
|
@ -740,7 +784,7 @@ def main():
|
|||
tags = dict(type='list', aliases=[ 'tag' ], default=None),
|
||||
poll_async = dict(choices=BOOLEANS, default=True),
|
||||
api_key = dict(default=None),
|
||||
api_secret = dict(default=None),
|
||||
api_secret = dict(default=None, no_log=True),
|
||||
api_url = dict(default=None),
|
||||
api_http_method = dict(default='get'),
|
||||
),
|
||||
|
@ -781,6 +825,9 @@ def main():
|
|||
except CloudStackException, e:
|
||||
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||
|
||||
except Exception, e:
|
||||
module.fail_json(msg='Exception: %s' % str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
# import module snippets
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: Manages instance groups on Apache CloudStack based clouds.
|
|||
description:
|
||||
- Create and remove instance groups.
|
||||
version_added: '2.0'
|
||||
author: René Moser
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
@ -56,7 +56,6 @@ extends_documentation_fragment: cloudstack
|
|||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Create an instance group
|
||||
- local_action:
|
||||
module: cs_instancegroup
|
||||
|
@ -197,7 +196,7 @@ def main():
|
|||
account = dict(default=None),
|
||||
project = dict(default=None),
|
||||
api_key = dict(default=None),
|
||||
api_secret = dict(default=None),
|
||||
api_secret = dict(default=None, no_log=True),
|
||||
api_url = dict(default=None),
|
||||
api_http_method = dict(default='get'),
|
||||
),
|
||||
|
@ -221,6 +220,9 @@ def main():
|
|||
except CloudStackException, e:
|
||||
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||
|
||||
except Exception, e:
|
||||
module.fail_json(msg='Exception: %s' % str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
# import module snippets
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: Manages ISOs images on Apache CloudStack based clouds.
|
|||
description:
|
||||
- Register and remove ISO images.
|
||||
version_added: '2.0'
|
||||
author: René Moser
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
@ -73,6 +73,16 @@ options:
|
|||
- Register the ISO to be bootable. Only used if C(state) is present.
|
||||
required: false
|
||||
default: true
|
||||
domain:
|
||||
description:
|
||||
- Domain the ISO is related to.
|
||||
required: false
|
||||
default: null
|
||||
account:
|
||||
description:
|
||||
- Account the ISO is related to.
|
||||
required: false
|
||||
default: null
|
||||
project:
|
||||
description:
|
||||
- Name of the project the ISO to be registered in.
|
||||
|
@ -99,7 +109,6 @@ extends_documentation_fragment: cloudstack
|
|||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Register an ISO if ISO name does not already exist.
|
||||
- local_action:
|
||||
module: cs_iso
|
||||
|
@ -169,6 +178,21 @@ created:
|
|||
returned: success
|
||||
type: string
|
||||
sample: 2015-03-29T14:57:06+0200
|
||||
domain:
|
||||
description: Domain the ISO is related to.
|
||||
returned: success
|
||||
type: string
|
||||
sample: example domain
|
||||
account:
|
||||
description: Account the ISO is related to.
|
||||
returned: success
|
||||
type: string
|
||||
sample: example account
|
||||
project:
|
||||
description: Project the ISO is related to.
|
||||
returned: success
|
||||
type: string
|
||||
sample: example project
|
||||
'''
|
||||
|
||||
try:
|
||||
|
@ -185,20 +209,26 @@ class AnsibleCloudStackIso(AnsibleCloudStack):
|
|||
|
||||
def __init__(self, module):
|
||||
AnsibleCloudStack.__init__(self, module)
|
||||
self.result = {
|
||||
'changed': False,
|
||||
}
|
||||
self.iso = None
|
||||
|
||||
def register_iso(self):
|
||||
iso = self.get_iso()
|
||||
if not iso:
|
||||
args = {}
|
||||
args['zoneid'] = self.get_zone_id()
|
||||
args['projectid'] = self.get_project_id()
|
||||
|
||||
args['bootable'] = self.module.params.get('bootable')
|
||||
args['ostypeid'] = self.get_os_type_id()
|
||||
args = {}
|
||||
args['zoneid'] = self.get_zone('id')
|
||||
args['domainid'] = self.get_domain('id')
|
||||
args['account'] = self.get_account('name')
|
||||
args['projectid'] = self.get_project('id')
|
||||
args['bootable'] = self.module.params.get('bootable')
|
||||
args['ostypeid'] = self.get_os_type('id')
|
||||
args['name'] = self.module.params.get('name')
|
||||
args['displaytext'] = self.module.params.get('name')
|
||||
args['checksum'] = self.module.params.get('checksum')
|
||||
args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable')
|
||||
args['isfeatured'] = self.module.params.get('is_featured')
|
||||
args['ispublic'] = self.module.params.get('is_public')
|
||||
|
||||
if args['bootable'] and not args['ostypeid']:
|
||||
self.module.fail_json(msg="OS type 'os_type' is requried if 'bootable=true'.")
|
||||
|
||||
|
@ -206,13 +236,6 @@ class AnsibleCloudStackIso(AnsibleCloudStack):
|
|||
if not args['url']:
|
||||
self.module.fail_json(msg="URL is requried.")
|
||||
|
||||
args['name'] = self.module.params.get('name')
|
||||
args['displaytext'] = self.module.params.get('name')
|
||||
args['checksum'] = self.module.params.get('checksum')
|
||||
args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable')
|
||||
args['isfeatured'] = self.module.params.get('is_featured')
|
||||
args['ispublic'] = self.module.params.get('is_public')
|
||||
|
||||
self.result['changed'] = True
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.registerIso(**args)
|
||||
|
@ -222,11 +245,14 @@ class AnsibleCloudStackIso(AnsibleCloudStack):
|
|||
|
||||
def get_iso(self):
|
||||
if not self.iso:
|
||||
args = {}
|
||||
args['isready'] = self.module.params.get('is_ready')
|
||||
args['isofilter'] = self.module.params.get('iso_filter')
|
||||
args['projectid'] = self.get_project_id()
|
||||
args['zoneid'] = self.get_zone_id()
|
||||
|
||||
args = {}
|
||||
args['isready'] = self.module.params.get('is_ready')
|
||||
args['isofilter'] = self.module.params.get('iso_filter')
|
||||
args['domainid'] = self.get_domain('id')
|
||||
args['account'] = self.get_account('name')
|
||||
args['projectid'] = self.get_project('id')
|
||||
args['zoneid'] = self.get_zone('id')
|
||||
|
||||
# if checksum is set, we only look on that.
|
||||
checksum = self.module.params.get('checksum')
|
||||
|
@ -249,10 +275,12 @@ class AnsibleCloudStackIso(AnsibleCloudStack):
|
|||
iso = self.get_iso()
|
||||
if iso:
|
||||
self.result['changed'] = True
|
||||
args = {}
|
||||
args['id'] = iso['id']
|
||||
args['projectid'] = self.get_project_id()
|
||||
args['zoneid'] = self.get_zone_id()
|
||||
|
||||
args = {}
|
||||
args['id'] = iso['id']
|
||||
args['projectid'] = self.get_project('id')
|
||||
args['zoneid'] = self.get_zone('id')
|
||||
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.deleteIso(**args)
|
||||
return iso
|
||||
|
@ -274,17 +302,25 @@ class AnsibleCloudStackIso(AnsibleCloudStack):
|
|||
self.result['is_ready'] = iso['isready']
|
||||
if 'created' in iso:
|
||||
self.result['created'] = iso['created']
|
||||
if 'project' in iso:
|
||||
self.result['project'] = iso['project']
|
||||
if 'domain' in iso:
|
||||
self.result['domain'] = iso['domain']
|
||||
if 'account' in iso:
|
||||
self.result['account'] = iso['account']
|
||||
return self.result
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
name = dict(required=True, default=None),
|
||||
name = dict(required=True),
|
||||
url = dict(default=None),
|
||||
os_type = dict(default=None),
|
||||
zone = dict(default=None),
|
||||
iso_filter = dict(default='self', choices=[ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]),
|
||||
domain = dict(default=None),
|
||||
account = dict(default=None),
|
||||
project = dict(default=None),
|
||||
checksum = dict(default=None),
|
||||
is_ready = dict(choices=BOOLEANS, default=False),
|
||||
|
@ -293,7 +329,7 @@ def main():
|
|||
is_dynamically_scalable = dict(choices=BOOLEANS, default=False),
|
||||
state = dict(choices=['present', 'absent'], default='present'),
|
||||
api_key = dict(default=None),
|
||||
api_secret = dict(default=None),
|
||||
api_secret = dict(default=None, no_log=True),
|
||||
api_url = dict(default=None),
|
||||
api_http_method = dict(default='get'),
|
||||
),
|
||||
|
@ -317,6 +353,9 @@ def main():
|
|||
except CloudStackException, e:
|
||||
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||
|
||||
except Exception, e:
|
||||
module.fail_json(msg='Exception: %s' % str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
# import module snippets
|
||||
|
|
436
cloud/cloudstack/cs_portforward.py
Normal file
436
cloud/cloudstack/cs_portforward.py
Normal file
|
@ -0,0 +1,436 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# (c) 2015, René Moser <mail@renemoser.net>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: cs_portforward
|
||||
short_description: Manages port forwarding rules on Apache CloudStack based clouds.
|
||||
description:
|
||||
- Create, update and remove port forwarding rules.
|
||||
version_added: '2.0'
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
options:
|
||||
ip_address:
|
||||
description:
|
||||
- Public IP address the rule is assigned to.
|
||||
required: true
|
||||
vm:
|
||||
description:
|
||||
- Name of virtual machine which we make the port forwarding rule for.
|
||||
- Required if C(state=present).
|
||||
required: false
|
||||
default: null
|
||||
state:
|
||||
description:
|
||||
- State of the port forwarding rule.
|
||||
required: false
|
||||
default: 'present'
|
||||
choices: [ 'present', 'absent' ]
|
||||
protocol:
|
||||
description:
|
||||
- Protocol of the port forwarding rule.
|
||||
required: false
|
||||
default: 'tcp'
|
||||
choices: [ 'tcp', 'udp' ]
|
||||
public_port:
|
||||
description:
|
||||
- Start public port for this rule.
|
||||
required: true
|
||||
public_end_port:
|
||||
description:
|
||||
- End public port for this rule.
|
||||
- If not specified equal C(public_port).
|
||||
required: false
|
||||
default: null
|
||||
private_port:
|
||||
description:
|
||||
- Start private port for this rule.
|
||||
required: true
|
||||
private_end_port:
|
||||
description:
|
||||
- End private port for this rule.
|
||||
- If not specified equal C(private_port).
|
||||
required: false
|
||||
default: null
|
||||
open_firewall:
|
||||
description:
|
||||
- Whether the firewall rule for public port should be created, while creating the new rule.
|
||||
- Use M(cs_firewall) for managing firewall rules.
|
||||
required: false
|
||||
default: false
|
||||
vm_guest_ip:
|
||||
description:
|
||||
- VM guest NIC secondary IP address for the port forwarding rule.
|
||||
required: false
|
||||
default: false
|
||||
domain:
|
||||
description:
|
||||
- Domain the C(vm) is related to.
|
||||
required: false
|
||||
default: null
|
||||
account:
|
||||
description:
|
||||
- Account the C(vm) is related to.
|
||||
required: false
|
||||
default: null
|
||||
project:
|
||||
description:
|
||||
- Name of the project the c(vm) is located in.
|
||||
required: false
|
||||
default: null
|
||||
zone:
|
||||
description:
|
||||
- Name of the zone in which the virtual machine is in. If not set, default zone is used.
|
||||
required: false
|
||||
default: null
|
||||
poll_async:
|
||||
description:
|
||||
- Poll async jobs until job has finished.
|
||||
required: false
|
||||
default: true
|
||||
extends_documentation_fragment: cloudstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# 1.2.3.4:80 -> web01:8080
|
||||
- local_action:
|
||||
module: cs_portforward
|
||||
ip_address: 1.2.3.4
|
||||
vm: web01
|
||||
public_port: 80
|
||||
private_port: 8080
|
||||
|
||||
|
||||
# forward SSH and open firewall
|
||||
- local_action:
|
||||
module: cs_portforward
|
||||
ip_address: '{{ public_ip }}'
|
||||
vm: '{{ inventory_hostname }}'
|
||||
public_port: '{{ ansible_ssh_port }}'
|
||||
private_port: 22
|
||||
open_firewall: true
|
||||
|
||||
|
||||
# forward DNS traffic, but do not open firewall
|
||||
- local_action:
|
||||
module: cs_portforward
|
||||
ip_address: 1.2.3.4
|
||||
vm: '{{ inventory_hostname }}'
|
||||
public_port: 53
|
||||
private_port: 53
|
||||
protocol: udp
|
||||
open_firewall: true
|
||||
|
||||
|
||||
# remove ssh port forwarding
|
||||
- local_action:
|
||||
module: cs_portforward
|
||||
ip_address: 1.2.3.4
|
||||
public_port: 22
|
||||
private_port: 22
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
ip_address:
|
||||
description: Public IP address.
|
||||
returned: success
|
||||
type: string
|
||||
sample: 1.2.3.4
|
||||
protocol:
|
||||
description: Protocol.
|
||||
returned: success
|
||||
type: string
|
||||
sample: tcp
|
||||
private_port:
|
||||
description: Private start port.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 80
|
||||
private_end_port:
|
||||
description: Private end port.
|
||||
returned: success
|
||||
type: int
|
||||
public_port:
|
||||
description: Public start port.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 80
|
||||
public_end_port:
|
||||
description: Public end port.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 80
|
||||
tags:
|
||||
description: Tag srelated to the port forwarding.
|
||||
returned: success
|
||||
type: list
|
||||
sample: []
|
||||
vm_name:
|
||||
description: Name of the virtual machine.
|
||||
returned: success
|
||||
type: string
|
||||
sample: web-01
|
||||
vm_display_name:
|
||||
description: Display name of the virtual machine.
|
||||
returned: success
|
||||
type: string
|
||||
sample: web-01
|
||||
vm_guest_ip:
|
||||
description: IP of the virtual machine.
|
||||
returned: success
|
||||
type: string
|
||||
sample: 10.101.65.152
|
||||
'''
|
||||
|
||||
|
||||
try:
|
||||
from cs import CloudStack, CloudStackException, read_config
|
||||
has_lib_cs = True
|
||||
except ImportError:
|
||||
has_lib_cs = False
|
||||
|
||||
# import cloudstack common
|
||||
from ansible.module_utils.cloudstack import *
|
||||
|
||||
|
||||
class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
|
||||
|
||||
def __init__(self, module):
|
||||
AnsibleCloudStack.__init__(self, module)
|
||||
self.portforwarding_rule = None
|
||||
self.vm_default_nic = None
|
||||
|
||||
|
||||
def get_public_end_port(self):
|
||||
if not self.module.params.get('public_end_port'):
|
||||
return self.module.params.get('public_port')
|
||||
return self.module.params.get('public_end_port')
|
||||
|
||||
|
||||
def get_private_end_port(self):
|
||||
if not self.module.params.get('private_end_port'):
|
||||
return self.module.params.get('private_port')
|
||||
return self.module.params.get('private_end_port')
|
||||
|
||||
|
||||
def get_vm_guest_ip(self):
|
||||
vm_guest_ip = self.module.params.get('vm_guest_ip')
|
||||
default_nic = self.get_vm_default_nic()
|
||||
|
||||
if not vm_guest_ip:
|
||||
return default_nic['ipaddress']
|
||||
|
||||
for secondary_ip in default_nic['secondaryip']:
|
||||
if vm_guest_ip == secondary_ip['ipaddress']:
|
||||
return vm_guest_ip
|
||||
self.module.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip)
|
||||
|
||||
|
||||
def get_vm_default_nic(self):
|
||||
if self.vm_default_nic:
|
||||
return self.vm_default_nic
|
||||
|
||||
nics = self.cs.listNics(virtualmachineid=self.get_vm(key='id'))
|
||||
if nics:
|
||||
for n in nics['nic']:
|
||||
if n['isdefault']:
|
||||
self.vm_default_nic = n
|
||||
return self.vm_default_nic
|
||||
self.module.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm'))
|
||||
|
||||
|
||||
def get_portforwarding_rule(self):
|
||||
if not self.portforwarding_rule:
|
||||
protocol = self.module.params.get('protocol')
|
||||
public_port = self.module.params.get('public_port')
|
||||
public_end_port = self.get_public_end_port()
|
||||
private_port = self.module.params.get('private_port')
|
||||
private_end_port = self.get_public_end_port()
|
||||
|
||||
args = {}
|
||||
args['ipaddressid'] = self.get_ip_address(key='id')
|
||||
args['projectid'] = self.get_project(key='id')
|
||||
portforwarding_rules = self.cs.listPortForwardingRules(**args)
|
||||
|
||||
if portforwarding_rules and 'portforwardingrule' in portforwarding_rules:
|
||||
for rule in portforwarding_rules['portforwardingrule']:
|
||||
if protocol == rule['protocol'] \
|
||||
and public_port == int(rule['publicport']):
|
||||
self.portforwarding_rule = rule
|
||||
break
|
||||
return self.portforwarding_rule
|
||||
|
||||
|
||||
def present_portforwarding_rule(self):
|
||||
portforwarding_rule = self.get_portforwarding_rule()
|
||||
if portforwarding_rule:
|
||||
portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule)
|
||||
else:
|
||||
portforwarding_rule = self.create_portforwarding_rule()
|
||||
return portforwarding_rule
|
||||
|
||||
|
||||
def create_portforwarding_rule(self):
|
||||
args = {}
|
||||
args['protocol'] = self.module.params.get('protocol')
|
||||
args['publicport'] = self.module.params.get('public_port')
|
||||
args['publicendport'] = self.get_public_end_port()
|
||||
args['privateport'] = self.module.params.get('private_port')
|
||||
args['privateendport'] = self.get_private_end_port()
|
||||
args['openfirewall'] = self.module.params.get('open_firewall')
|
||||
args['vmguestip'] = self.get_vm_guest_ip()
|
||||
args['ipaddressid'] = self.get_ip_address(key='id')
|
||||
args['virtualmachineid'] = self.get_vm(key='id')
|
||||
|
||||
portforwarding_rule = None
|
||||
self.result['changed'] = True
|
||||
if not self.module.check_mode:
|
||||
portforwarding_rule = self.cs.createPortForwardingRule(**args)
|
||||
poll_async = self.module.params.get('poll_async')
|
||||
if poll_async:
|
||||
portforwarding_rule = self._poll_job(portforwarding_rule, 'portforwardingrule')
|
||||
return portforwarding_rule
|
||||
|
||||
|
||||
def update_portforwarding_rule(self, portforwarding_rule):
|
||||
args = {}
|
||||
args['protocol'] = self.module.params.get('protocol')
|
||||
args['publicport'] = self.module.params.get('public_port')
|
||||
args['publicendport'] = self.get_public_end_port()
|
||||
args['privateport'] = self.module.params.get('private_port')
|
||||
args['privateendport'] = self.get_private_end_port()
|
||||
args['openfirewall'] = self.module.params.get('open_firewall')
|
||||
args['vmguestip'] = self.get_vm_guest_ip()
|
||||
args['ipaddressid'] = self.get_ip_address(key='id')
|
||||
args['virtualmachineid'] = self.get_vm(key='id')
|
||||
|
||||
if self._has_changed(args, portforwarding_rule):
|
||||
self.result['changed'] = True
|
||||
if not self.module.check_mode:
|
||||
# API broken in 4.2.1?, workaround using remove/create instead of update
|
||||
# portforwarding_rule = self.cs.updatePortForwardingRule(**args)
|
||||
self.absent_portforwarding_rule()
|
||||
portforwarding_rule = self.cs.createPortForwardingRule(**args)
|
||||
poll_async = self.module.params.get('poll_async')
|
||||
if poll_async:
|
||||
portforwarding_rule = self._poll_job(portforwarding_rule, 'portforwardingrule')
|
||||
return portforwarding_rule
|
||||
|
||||
|
||||
def absent_portforwarding_rule(self):
|
||||
portforwarding_rule = self.get_portforwarding_rule()
|
||||
|
||||
if portforwarding_rule:
|
||||
self.result['changed'] = True
|
||||
args = {}
|
||||
args['id'] = portforwarding_rule['id']
|
||||
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.deletePortForwardingRule(**args)
|
||||
poll_async = self.module.params.get('poll_async')
|
||||
if poll_async:
|
||||
self._poll_job(res, 'portforwardingrule')
|
||||
return portforwarding_rule
|
||||
|
||||
|
||||
def get_result(self, portforwarding_rule):
|
||||
if portforwarding_rule:
|
||||
if 'id' in portforwarding_rule:
|
||||
self.result['id'] = portforwarding_rule['id']
|
||||
if 'virtualmachinedisplayname' in portforwarding_rule:
|
||||
self.result['vm_display_name'] = portforwarding_rule['virtualmachinedisplayname']
|
||||
if 'virtualmachinename' in portforwarding_rule:
|
||||
self.result['vm_name'] = portforwarding_rule['virtualmachinename']
|
||||
if 'ipaddress' in portforwarding_rule:
|
||||
self.result['ip_address'] = portforwarding_rule['ipaddress']
|
||||
if 'vmguestip' in portforwarding_rule:
|
||||
self.result['vm_guest_ip'] = portforwarding_rule['vmguestip']
|
||||
if 'publicport' in portforwarding_rule:
|
||||
self.result['public_port'] = int(portforwarding_rule['publicport'])
|
||||
if 'publicendport' in portforwarding_rule:
|
||||
self.result['public_end_port'] = int(portforwarding_rule['publicendport'])
|
||||
if 'privateport' in portforwarding_rule:
|
||||
self.result['private_port'] = int(portforwarding_rule['privateport'])
|
||||
if 'privateendport' in portforwarding_rule:
|
||||
self.result['private_end_port'] = int(portforwarding_rule['privateendport'])
|
||||
if 'protocol' in portforwarding_rule:
|
||||
self.result['protocol'] = portforwarding_rule['protocol']
|
||||
if 'tags' in portforwarding_rule:
|
||||
self.result['tags'] = []
|
||||
for tag in portforwarding_rule['tags']:
|
||||
result_tag = {}
|
||||
result_tag['key'] = tag['key']
|
||||
result_tag['value'] = tag['value']
|
||||
self.result['tags'].append(result_tag)
|
||||
return self.result
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
ip_address = dict(required=True),
|
||||
protocol= dict(choices=['tcp', 'udp'], default='tcp'),
|
||||
public_port = dict(type='int', required=True),
|
||||
public_end_port = dict(type='int', default=None),
|
||||
private_port = dict(type='int', required=True),
|
||||
private_end_port = dict(type='int', default=None),
|
||||
state = dict(choices=['present', 'absent'], default='present'),
|
||||
open_firewall = dict(choices=BOOLEANS, default=False),
|
||||
vm_guest_ip = dict(default=None),
|
||||
vm = dict(default=None),
|
||||
zone = dict(default=None),
|
||||
domain = dict(default=None),
|
||||
account = dict(default=None),
|
||||
project = dict(default=None),
|
||||
poll_async = dict(choices=BOOLEANS, default=True),
|
||||
api_key = dict(default=None),
|
||||
api_secret = dict(default=None, no_log=True),
|
||||
api_url = dict(default=None),
|
||||
api_http_method = dict(default='get'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if not has_lib_cs:
|
||||
module.fail_json(msg="python library cs required: pip install cs")
|
||||
|
||||
try:
|
||||
acs_pf = AnsibleCloudStackPortforwarding(module)
|
||||
state = module.params.get('state')
|
||||
if state in ['absent']:
|
||||
pf_rule = acs_pf.absent_portforwarding_rule()
|
||||
else:
|
||||
pf_rule = acs_pf.present_portforwarding_rule()
|
||||
|
||||
result = acs_pf.get_result(pf_rule)
|
||||
|
||||
except CloudStackException, e:
|
||||
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||
|
||||
except Exception, e:
|
||||
module.fail_json(msg='Exception: %s' % str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
|
@ -25,7 +25,7 @@ short_description: Manages security groups on Apache CloudStack based clouds.
|
|||
description:
|
||||
- Create and remove security groups.
|
||||
version_added: '2.0'
|
||||
author: René Moser
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
@ -51,7 +51,6 @@ extends_documentation_fragment: cloudstack
|
|||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Create a security group
|
||||
- local_action:
|
||||
module: cs_securitygroup
|
||||
|
@ -94,9 +93,6 @@ class AnsibleCloudStackSecurityGroup(AnsibleCloudStack):
|
|||
|
||||
def __init__(self, module):
|
||||
AnsibleCloudStack.__init__(self, module)
|
||||
self.result = {
|
||||
'changed': False,
|
||||
}
|
||||
self.security_group = None
|
||||
|
||||
|
||||
|
@ -104,7 +100,7 @@ class AnsibleCloudStackSecurityGroup(AnsibleCloudStack):
|
|||
if not self.security_group:
|
||||
sg_name = self.module.params.get('name')
|
||||
args = {}
|
||||
args['projectid'] = self.get_project_id()
|
||||
args['projectid'] = self.get_project('id')
|
||||
sgs = self.cs.listSecurityGroups(**args)
|
||||
if sgs:
|
||||
for s in sgs['securitygroup']:
|
||||
|
@ -121,7 +117,7 @@ class AnsibleCloudStackSecurityGroup(AnsibleCloudStack):
|
|||
|
||||
args = {}
|
||||
args['name'] = self.module.params.get('name')
|
||||
args['projectid'] = self.get_project_id()
|
||||
args['projectid'] = self.get_project('id')
|
||||
args['description'] = self.module.params.get('description')
|
||||
|
||||
if not self.module.check_mode:
|
||||
|
@ -140,7 +136,7 @@ class AnsibleCloudStackSecurityGroup(AnsibleCloudStack):
|
|||
|
||||
args = {}
|
||||
args['name'] = self.module.params.get('name')
|
||||
args['projectid'] = self.get_project_id()
|
||||
args['projectid'] = self.get_project('id')
|
||||
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.deleteSecurityGroup(**args)
|
||||
|
@ -167,7 +163,7 @@ def main():
|
|||
state = dict(choices=['present', 'absent'], default='present'),
|
||||
project = dict(default=None),
|
||||
api_key = dict(default=None),
|
||||
api_secret = dict(default=None),
|
||||
api_secret = dict(default=None, no_log=True),
|
||||
api_url = dict(default=None),
|
||||
api_http_method = dict(default='get'),
|
||||
),
|
||||
|
@ -191,6 +187,9 @@ def main():
|
|||
except CloudStackException, e:
|
||||
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||
|
||||
except Exception, e:
|
||||
module.fail_json(msg='Exception: %s' % str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
# import module snippets
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: Manages security group rules on Apache CloudStack based cloud
|
|||
description:
|
||||
- Add and remove security group rules.
|
||||
version_added: '2.0'
|
||||
author: René Moser
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
options:
|
||||
security_group:
|
||||
description:
|
||||
|
@ -194,9 +194,6 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack):
|
|||
|
||||
def __init__(self, module):
|
||||
AnsibleCloudStack.__init__(self, module)
|
||||
self.result = {
|
||||
'changed': False,
|
||||
}
|
||||
|
||||
|
||||
def _tcp_udp_match(self, rule, protocol, start_port, end_port):
|
||||
|
@ -229,18 +226,21 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack):
|
|||
and cidr == rule['cidr']
|
||||
|
||||
|
||||
def get_end_port(self):
|
||||
if self.module.params.get('end_port'):
|
||||
return self.module.params.get('end_port')
|
||||
return self.module.params.get('start_port')
|
||||
|
||||
|
||||
def _get_rule(self, rules):
|
||||
user_security_group_name = self.module.params.get('user_security_group')
|
||||
cidr = self.module.params.get('cidr')
|
||||
protocol = self.module.params.get('protocol')
|
||||
start_port = self.module.params.get('start_port')
|
||||
end_port = self.module.params.get('end_port')
|
||||
end_port = self.get_end_port()
|
||||
icmp_code = self.module.params.get('icmp_code')
|
||||
icmp_type = self.module.params.get('icmp_type')
|
||||
|
||||
if not end_port:
|
||||
end_port = start_port
|
||||
|
||||
if protocol in ['tcp', 'udp'] and not (start_port and end_port):
|
||||
self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol)
|
||||
|
||||
|
@ -268,7 +268,7 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack):
|
|||
security_group_name = self.module.params.get('security_group')
|
||||
args = {}
|
||||
args['securitygroupname'] = security_group_name
|
||||
args['projectid'] = self.get_project_id()
|
||||
args['projectid'] = self.get_project('id')
|
||||
sgs = self.cs.listSecurityGroups(**args)
|
||||
if not sgs or 'securitygroup' not in sgs:
|
||||
self.module.fail_json(msg="security group '%s' not found" % security_group_name)
|
||||
|
@ -295,26 +295,23 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack):
|
|||
|
||||
args['protocol'] = self.module.params.get('protocol')
|
||||
args['startport'] = self.module.params.get('start_port')
|
||||
args['endport'] = self.module.params.get('end_port')
|
||||
args['endport'] = self.get_end_port()
|
||||
args['icmptype'] = self.module.params.get('icmp_type')
|
||||
args['icmpcode'] = self.module.params.get('icmp_code')
|
||||
args['projectid'] = self.get_project_id()
|
||||
args['projectid'] = self.get_project('id')
|
||||
args['securitygroupid'] = security_group['id']
|
||||
|
||||
if not args['endport']:
|
||||
args['endport'] = args['startport']
|
||||
|
||||
rule = None
|
||||
res = None
|
||||
type = self.module.params.get('type')
|
||||
if type == 'ingress':
|
||||
sg_type = self.module.params.get('type')
|
||||
if sg_type == 'ingress':
|
||||
rule = self._get_rule(security_group['ingressrule'])
|
||||
if not rule:
|
||||
self.result['changed'] = True
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.authorizeSecurityGroupIngress(**args)
|
||||
|
||||
elif type == 'egress':
|
||||
elif sg_type == 'egress':
|
||||
rule = self._get_rule(security_group['egressrule'])
|
||||
if not rule:
|
||||
self.result['changed'] = True
|
||||
|
@ -327,22 +324,25 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack):
|
|||
poll_async = self.module.params.get('poll_async')
|
||||
if res and poll_async:
|
||||
security_group = self._poll_job(res, 'securitygroup')
|
||||
return security_group
|
||||
key = sg_type + "rule" # ingressrule / egressrule
|
||||
if key in security_group:
|
||||
rule = security_group[key][0]
|
||||
return rule
|
||||
|
||||
|
||||
def remove_rule(self):
|
||||
security_group = self.get_security_group()
|
||||
rule = None
|
||||
res = None
|
||||
type = self.module.params.get('type')
|
||||
if type == 'ingress':
|
||||
sg_type = self.module.params.get('type')
|
||||
if sg_type == 'ingress':
|
||||
rule = self._get_rule(security_group['ingressrule'])
|
||||
if rule:
|
||||
self.result['changed'] = True
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.revokeSecurityGroupIngress(id=rule['ruleid'])
|
||||
|
||||
elif type == 'egress':
|
||||
elif sg_type == 'egress':
|
||||
rule = self._get_rule(security_group['egressrule'])
|
||||
if rule:
|
||||
self.result['changed'] = True
|
||||
|
@ -355,34 +355,30 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack):
|
|||
poll_async = self.module.params.get('poll_async')
|
||||
if res and poll_async:
|
||||
res = self._poll_job(res, 'securitygroup')
|
||||
return security_group
|
||||
return rule
|
||||
|
||||
|
||||
def get_result(self, security_group_rule):
|
||||
type = self.module.params.get('type')
|
||||
|
||||
key = 'ingressrule'
|
||||
if type == 'egress':
|
||||
key = 'egressrule'
|
||||
|
||||
self.result['type'] = type
|
||||
self.result['type'] = self.module.params.get('type')
|
||||
self.result['security_group'] = self.module.params.get('security_group')
|
||||
|
||||
if key in security_group_rule and security_group_rule[key]:
|
||||
if 'securitygroupname' in security_group_rule[key][0]:
|
||||
self.result['user_security_group'] = security_group_rule[key][0]['securitygroupname']
|
||||
if 'cidr' in security_group_rule[key][0]:
|
||||
self.result['cidr'] = security_group_rule[key][0]['cidr']
|
||||
if 'protocol' in security_group_rule[key][0]:
|
||||
self.result['protocol'] = security_group_rule[key][0]['protocol']
|
||||
if 'startport' in security_group_rule[key][0]:
|
||||
self.result['start_port'] = security_group_rule[key][0]['startport']
|
||||
if 'endport' in security_group_rule[key][0]:
|
||||
self.result['end_port'] = security_group_rule[key][0]['endport']
|
||||
if 'icmpcode' in security_group_rule[key][0]:
|
||||
self.result['icmp_code'] = security_group_rule[key][0]['icmpcode']
|
||||
if 'icmptype' in security_group_rule[key][0]:
|
||||
self.result['icmp_type'] = security_group_rule[key][0]['icmptype']
|
||||
|
||||
if security_group_rule:
|
||||
rule = security_group_rule
|
||||
if 'securitygroupname' in rule:
|
||||
self.result['user_security_group'] = rule['securitygroupname']
|
||||
if 'cidr' in rule:
|
||||
self.result['cidr'] = rule['cidr']
|
||||
if 'protocol' in rule:
|
||||
self.result['protocol'] = rule['protocol']
|
||||
if 'startport' in rule:
|
||||
self.result['start_port'] = rule['startport']
|
||||
if 'endport' in rule:
|
||||
self.result['end_port'] = rule['endport']
|
||||
if 'icmpcode' in rule:
|
||||
self.result['icmp_code'] = rule['icmpcode']
|
||||
if 'icmptype' in rule:
|
||||
self.result['icmp_type'] = rule['icmptype']
|
||||
return self.result
|
||||
|
||||
|
||||
|
@ -402,7 +398,7 @@ def main():
|
|||
project = dict(default=None),
|
||||
poll_async = dict(choices=BOOLEANS, default=True),
|
||||
api_key = dict(default=None),
|
||||
api_secret = dict(default=None),
|
||||
api_secret = dict(default=None, no_log=True),
|
||||
api_url = dict(default=None),
|
||||
api_http_method = dict(default='get'),
|
||||
),
|
||||
|
@ -432,6 +428,9 @@ def main():
|
|||
except CloudStackException, e:
|
||||
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||
|
||||
except Exception, e:
|
||||
module.fail_json(msg='Exception: %s' % str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
# import module snippets
|
||||
|
|
|
@ -23,15 +23,26 @@ DOCUMENTATION = '''
|
|||
module: cs_sshkeypair
|
||||
short_description: Manages SSH keys on Apache CloudStack based clouds.
|
||||
description:
|
||||
- Create, register and remove SSH keys.
|
||||
- If no key was found and no public key was provided and a new SSH
|
||||
private/public key pair will be created and the private key will be returned.
|
||||
version_added: '2.0'
|
||||
author: René Moser
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of public key.
|
||||
required: true
|
||||
domain:
|
||||
description:
|
||||
- Domain the public key is related to.
|
||||
required: false
|
||||
default: null
|
||||
account:
|
||||
description:
|
||||
- Account the public key is related to.
|
||||
required: false
|
||||
default: null
|
||||
project:
|
||||
description:
|
||||
- Name of the project the public key to be registered in.
|
||||
|
@ -52,7 +63,6 @@ extends_documentation_fragment: cloudstack
|
|||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# create a new private / public key pair:
|
||||
- local_action: cs_sshkeypair name=linus@example.com
|
||||
register: key
|
||||
|
@ -103,18 +113,16 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack):
|
|||
|
||||
def __init__(self, module):
|
||||
AnsibleCloudStack.__init__(self, module)
|
||||
self.result = {
|
||||
'changed': False,
|
||||
}
|
||||
self.ssh_key = None
|
||||
|
||||
|
||||
def register_ssh_key(self, public_key):
|
||||
ssh_key = self.get_ssh_key()
|
||||
|
||||
args = {}
|
||||
args['projectid'] = self.get_project_id()
|
||||
args['name'] = self.module.params.get('name')
|
||||
args = {}
|
||||
args['domainid'] = self.get_domain('id')
|
||||
args['account'] = self.get_account('name')
|
||||
args['projectid'] = self.get_project('id')
|
||||
args['name'] = self.module.params.get('name')
|
||||
|
||||
res = None
|
||||
if not ssh_key:
|
||||
|
@ -142,9 +150,11 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack):
|
|||
ssh_key = self.get_ssh_key()
|
||||
if not ssh_key:
|
||||
self.result['changed'] = True
|
||||
args = {}
|
||||
args['projectid'] = self.get_project_id()
|
||||
args['name'] = self.module.params.get('name')
|
||||
args = {}
|
||||
args['domainid'] = self.get_domain('id')
|
||||
args['account'] = self.get_account('name')
|
||||
args['projectid'] = self.get_project('id')
|
||||
args['name'] = self.module.params.get('name')
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.createSSHKeyPair(**args)
|
||||
ssh_key = res['keypair']
|
||||
|
@ -155,9 +165,11 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack):
|
|||
ssh_key = self.get_ssh_key()
|
||||
if ssh_key:
|
||||
self.result['changed'] = True
|
||||
args = {}
|
||||
args['name'] = self.module.params.get('name')
|
||||
args['projectid'] = self.get_project_id()
|
||||
args = {}
|
||||
args['domainid'] = self.get_domain('id')
|
||||
args['account'] = self.get_account('name')
|
||||
args['projectid'] = self.get_project('id')
|
||||
args['name'] = self.module.params.get('name')
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.deleteSSHKeyPair(**args)
|
||||
return ssh_key
|
||||
|
@ -165,9 +177,11 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack):
|
|||
|
||||
def get_ssh_key(self):
|
||||
if not self.ssh_key:
|
||||
args = {}
|
||||
args['projectid'] = self.get_project_id()
|
||||
args['name'] = self.module.params.get('name')
|
||||
args = {}
|
||||
args['domainid'] = self.get_domain('id')
|
||||
args['account'] = self.get_account('name')
|
||||
args['projectid'] = self.get_project('id')
|
||||
args['name'] = self.module.params.get('name')
|
||||
|
||||
ssh_keys = self.cs.listSSHKeyPairs(**args)
|
||||
if ssh_keys and 'sshkeypair' in ssh_keys:
|
||||
|
@ -179,10 +193,8 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack):
|
|||
if ssh_key:
|
||||
if 'fingerprint' in ssh_key:
|
||||
self.result['fingerprint'] = ssh_key['fingerprint']
|
||||
|
||||
if 'name' in ssh_key:
|
||||
self.result['name'] = ssh_key['name']
|
||||
|
||||
if 'privatekey' in ssh_key:
|
||||
self.result['private_key'] = ssh_key['privatekey']
|
||||
return self.result
|
||||
|
@ -196,12 +208,14 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack):
|
|||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
name = dict(required=True, default=None),
|
||||
name = dict(required=True),
|
||||
public_key = dict(default=None),
|
||||
domain = dict(default=None),
|
||||
account = dict(default=None),
|
||||
project = dict(default=None),
|
||||
state = dict(choices=['present', 'absent'], default='present'),
|
||||
api_key = dict(default=None),
|
||||
api_secret = dict(default=None),
|
||||
api_secret = dict(default=None, no_log=True),
|
||||
api_url = dict(default=None),
|
||||
api_http_method = dict(default='get'),
|
||||
),
|
||||
|
@ -231,6 +245,9 @@ def main():
|
|||
except CloudStackException, e:
|
||||
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||
|
||||
except Exception, e:
|
||||
module.fail_json(msg='Exception: %s' % str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
# import module snippets
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: Manages VM snapshots on Apache CloudStack based clouds.
|
|||
description:
|
||||
- Create, remove and revert VM from snapshots.
|
||||
version_added: '2.0'
|
||||
author: René Moser
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
@ -62,6 +62,16 @@ options:
|
|||
required: false
|
||||
default: 'present'
|
||||
choices: [ 'present', 'absent', 'revert' ]
|
||||
domain:
|
||||
description:
|
||||
- Domain the VM snapshot is related to.
|
||||
required: false
|
||||
default: null
|
||||
account:
|
||||
description:
|
||||
- Account the VM snapshot is related to.
|
||||
required: false
|
||||
default: null
|
||||
poll_async:
|
||||
description:
|
||||
- Poll async jobs until job has finished.
|
||||
|
@ -71,7 +81,6 @@ extends_documentation_fragment: cloudstack
|
|||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Create a VM snapshot of disk and memory before an upgrade
|
||||
- local_action:
|
||||
module: cs_vmsnapshot
|
||||
|
@ -134,6 +143,21 @@ description:
|
|||
returned: success
|
||||
type: string
|
||||
sample: snapshot brought to you by Ansible
|
||||
domain:
|
||||
description: Domain the the vm snapshot is related to.
|
||||
returned: success
|
||||
type: string
|
||||
sample: example domain
|
||||
account:
|
||||
description: Account the vm snapshot is related to.
|
||||
returned: success
|
||||
type: string
|
||||
sample: example account
|
||||
project:
|
||||
description: Name of project the vm snapshot is related to.
|
||||
returned: success
|
||||
type: string
|
||||
sample: Production
|
||||
'''
|
||||
|
||||
try:
|
||||
|
@ -150,16 +174,15 @@ class AnsibleCloudStackVmSnapshot(AnsibleCloudStack):
|
|||
|
||||
def __init__(self, module):
|
||||
AnsibleCloudStack.__init__(self, module)
|
||||
self.result = {
|
||||
'changed': False,
|
||||
}
|
||||
|
||||
|
||||
def get_snapshot(self):
|
||||
args = {}
|
||||
args['virtualmachineid'] = self.get_vm_id()
|
||||
args['projectid'] = self.get_project_id()
|
||||
args['name'] = self.module.params.get('name')
|
||||
args = {}
|
||||
args['virtualmachineid'] = self.get_vm('id')
|
||||
args['account'] = self.get_account('name')
|
||||
args['domainid'] = self.get_domain('id')
|
||||
args['projectid'] = self.get_project('id')
|
||||
args['name'] = self.module.params.get('name')
|
||||
|
||||
snapshots = self.cs.listVMSnapshot(**args)
|
||||
if snapshots:
|
||||
|
@ -172,11 +195,11 @@ class AnsibleCloudStackVmSnapshot(AnsibleCloudStack):
|
|||
if not snapshot:
|
||||
self.result['changed'] = True
|
||||
|
||||
args = {}
|
||||
args['virtualmachineid'] = self.get_vm_id()
|
||||
args['name'] = self.module.params.get('name')
|
||||
args['description'] = self.module.params.get('description')
|
||||
args['snapshotmemory'] = self.module.params.get('snapshot_memory')
|
||||
args = {}
|
||||
args['virtualmachineid'] = self.get_vm('id')
|
||||
args['name'] = self.module.params.get('name')
|
||||
args['description'] = self.module.params.get('description')
|
||||
args['snapshotmemory'] = self.module.params.get('snapshot_memory')
|
||||
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.createVMSnapshot(**args)
|
||||
|
@ -242,6 +265,12 @@ class AnsibleCloudStackVmSnapshot(AnsibleCloudStack):
|
|||
self.result['name'] = snapshot['name']
|
||||
if 'description' in snapshot:
|
||||
self.result['description'] = snapshot['description']
|
||||
if 'domain' in snapshot:
|
||||
self.result['domain'] = snapshot['domain']
|
||||
if 'account' in snapshot:
|
||||
self.result['account'] = snapshot['account']
|
||||
if 'project' in snapshot:
|
||||
self.result['project'] = snapshot['project']
|
||||
return self.result
|
||||
|
||||
|
||||
|
@ -251,13 +280,15 @@ def main():
|
|||
name = dict(required=True, aliases=['displayname']),
|
||||
vm = dict(required=True),
|
||||
description = dict(default=None),
|
||||
project = dict(default=None),
|
||||
zone = dict(default=None),
|
||||
snapshot_memory = dict(choices=BOOLEANS, default=False),
|
||||
state = dict(choices=['present', 'absent', 'revert'], default='present'),
|
||||
domain = dict(default=None),
|
||||
account = dict(default=None),
|
||||
project = dict(default=None),
|
||||
poll_async = dict(choices=BOOLEANS, default=True),
|
||||
api_key = dict(default=None),
|
||||
api_secret = dict(default=None),
|
||||
api_secret = dict(default=None, no_log=True),
|
||||
api_url = dict(default=None),
|
||||
api_http_method = dict(default='get'),
|
||||
),
|
||||
|
@ -283,6 +314,9 @@ def main():
|
|||
except CloudStackException, e:
|
||||
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||
|
||||
except Exception, e:
|
||||
module.fail_json(msg='Exception: %s' % str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
# import module snippets
|
||||
|
|
|
@ -78,8 +78,10 @@ options:
|
|||
default: null
|
||||
aliases: []
|
||||
|
||||
requirements: [ "libcloud" ]
|
||||
author: Peter Tan <ptan@google.com>
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "apache-libcloud"
|
||||
author: '"Peter Tan (@tanpeter)" <ptan@google.com>'
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -26,7 +26,7 @@ short_description: Manage LXC Containers
|
|||
version_added: 1.8.0
|
||||
description:
|
||||
- Management of LXC containers
|
||||
author: Kevin Carter
|
||||
author: '"Kevin Carter (@cloudnull)" <kevin.carter@rackspace.com>'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
@ -38,6 +38,7 @@ options:
|
|||
- lvm
|
||||
- loop
|
||||
- btrfs
|
||||
- overlayfs
|
||||
description:
|
||||
- Backend storage type for the container.
|
||||
required: false
|
||||
|
@ -112,6 +113,24 @@ options:
|
|||
- Set the log level for a container where *container_log* was set.
|
||||
required: false
|
||||
default: INFO
|
||||
clone_name:
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- Name of the new cloned server. This is only used when state is
|
||||
clone.
|
||||
required: false
|
||||
default: false
|
||||
clone_snapshot:
|
||||
version_added: "2.0"
|
||||
required: false
|
||||
choices:
|
||||
- true
|
||||
- false
|
||||
description:
|
||||
- Create a snapshot a container when cloning. This is not supported
|
||||
by all container storage backends. Enabling this may fail if the
|
||||
backing store does not support snapshots.
|
||||
default: false
|
||||
archive:
|
||||
choices:
|
||||
- true
|
||||
|
@ -142,14 +161,21 @@ options:
|
|||
- absent
|
||||
- frozen
|
||||
description:
|
||||
- Start a container right after it's created.
|
||||
- Define the state of a container. If you clone a container using
|
||||
`clone_name` the newly cloned container created in a stopped state.
|
||||
The running container will be stopped while the clone operation is
|
||||
happening and upon completion of the clone the original container
|
||||
state will be restored.
|
||||
required: false
|
||||
default: started
|
||||
container_config:
|
||||
description:
|
||||
- list of 'key=value' options to use when configuring a container.
|
||||
required: false
|
||||
requirements: ['lxc >= 1.0', 'python2-lxc >= 0.1']
|
||||
requirements:
|
||||
- 'lxc >= 1.0'
|
||||
- 'python >= 2.6'
|
||||
- 'python2-lxc >= 0.1'
|
||||
notes:
|
||||
- Containers must have a unique name. If you attempt to create a container
|
||||
with a name that already exists in the users namespace the module will
|
||||
|
@ -203,6 +229,7 @@ EXAMPLES = """
|
|||
- name: Create filesystem container
|
||||
lxc_container:
|
||||
name: test-container-config
|
||||
backing_store: dir
|
||||
container_log: true
|
||||
template: ubuntu
|
||||
state: started
|
||||
|
@ -216,7 +243,7 @@ EXAMPLES = """
|
|||
# Create an lvm container, run a complex command in it, add additional
|
||||
# configuration to it, create an archive of it, and finally leave the container
|
||||
# in a frozen state. The container archive will be compressed using bzip2
|
||||
- name: Create an lvm container
|
||||
- name: Create a frozen lvm container
|
||||
lxc_container:
|
||||
name: test-container-lvm
|
||||
container_log: true
|
||||
|
@ -241,14 +268,6 @@ EXAMPLES = """
|
|||
- name: Debug info on container "test-container-lvm"
|
||||
debug: var=lvm_container_info
|
||||
|
||||
- name: Get information on a given container.
|
||||
lxc_container:
|
||||
name: test-container-config
|
||||
register: config_container_info
|
||||
|
||||
- name: debug info on container "test-container"
|
||||
debug: var=config_container_info
|
||||
|
||||
- name: Run a command in a container and ensure its in a "stopped" state.
|
||||
lxc_container:
|
||||
name: test-container-started
|
||||
|
@ -263,19 +282,19 @@ EXAMPLES = """
|
|||
container_command: |
|
||||
echo 'hello world.' | tee /opt/frozen
|
||||
|
||||
- name: Start a container.
|
||||
- name: Start a container
|
||||
lxc_container:
|
||||
name: test-container-stopped
|
||||
state: started
|
||||
|
||||
- name: Run a command in a container and then restart it.
|
||||
- name: Run a command in a container and then restart it
|
||||
lxc_container:
|
||||
name: test-container-started
|
||||
state: restarted
|
||||
container_command: |
|
||||
echo 'hello world.' | tee /opt/restarted
|
||||
|
||||
- name: Run a complex command within a "running" container.
|
||||
- name: Run a complex command within a "running" container
|
||||
lxc_container:
|
||||
name: test-container-started
|
||||
container_command: |
|
||||
|
@ -295,7 +314,53 @@ EXAMPLES = """
|
|||
archive: true
|
||||
archive_path: /opt/archives
|
||||
|
||||
- name: Destroy a container.
|
||||
# Create a container using overlayfs, create an archive of it, create a
|
||||
# snapshot clone of the container and and finally leave the container
|
||||
# in a frozen state. The container archive will be compressed using gzip.
|
||||
- name: Create an overlayfs container archive and clone it
|
||||
lxc_container:
|
||||
name: test-container-overlayfs
|
||||
container_log: true
|
||||
template: ubuntu
|
||||
state: started
|
||||
backing_store: overlayfs
|
||||
template_options: --release trusty
|
||||
clone_snapshot: true
|
||||
clone_name: test-container-overlayfs-clone-snapshot
|
||||
archive: true
|
||||
archive_compression: gzip
|
||||
register: clone_container_info
|
||||
|
||||
- name: debug info on container "test-container"
|
||||
debug: var=clone_container_info
|
||||
|
||||
- name: Clone a container using snapshot
|
||||
lxc_container:
|
||||
name: test-container-overlayfs-clone-snapshot
|
||||
backing_store: overlayfs
|
||||
clone_name: test-container-overlayfs-clone-snapshot2
|
||||
clone_snapshot: true
|
||||
|
||||
- name: Create a new container and clone it
|
||||
lxc_container:
|
||||
name: test-container-new-archive
|
||||
backing_store: dir
|
||||
clone_name: test-container-new-archive-clone
|
||||
|
||||
- name: Archive and clone a container then destroy it
|
||||
lxc_container:
|
||||
name: test-container-new-archive
|
||||
state: absent
|
||||
clone_name: test-container-new-archive-destroyed-clone
|
||||
archive: true
|
||||
archive_compression: gzip
|
||||
|
||||
- name: Start a cloned container.
|
||||
lxc_container:
|
||||
name: test-container-new-archive-destroyed-clone
|
||||
state: started
|
||||
|
||||
- name: Destroy a container
|
||||
lxc_container:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
|
@ -305,6 +370,13 @@ EXAMPLES = """
|
|||
- test-container-frozen
|
||||
- test-container-lvm
|
||||
- test-container-config
|
||||
- test-container-overlayfs
|
||||
- test-container-overlayfs-clone
|
||||
- test-container-overlayfs-clone-snapshot
|
||||
- test-container-overlayfs-clone-snapshot2
|
||||
- test-container-new-archive
|
||||
- test-container-new-archive-clone
|
||||
- test-container-new-archive-destroyed-clone
|
||||
"""
|
||||
|
||||
|
||||
|
@ -351,6 +423,15 @@ LXC_COMMAND_MAP = {
|
|||
'directory': '--dir',
|
||||
'zfs_root': '--zfsroot'
|
||||
}
|
||||
},
|
||||
'clone': {
|
||||
'variables': {
|
||||
'backing_store': '--backingstore',
|
||||
'lxc_path': '--lxcpath',
|
||||
'fs_size': '--fssize',
|
||||
'name': '--orig',
|
||||
'clone_name': '--new'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -369,6 +450,9 @@ LXC_BACKING_STORE = {
|
|||
],
|
||||
'loop': [
|
||||
'lv_name', 'vg_name', 'thinpool', 'zfs_root'
|
||||
],
|
||||
'overlayfs': [
|
||||
'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root'
|
||||
]
|
||||
}
|
||||
|
||||
|
@ -388,7 +472,8 @@ LXC_ANSIBLE_STATES = {
|
|||
'stopped': '_stopped',
|
||||
'restarted': '_restarted',
|
||||
'absent': '_destroyed',
|
||||
'frozen': '_frozen'
|
||||
'frozen': '_frozen',
|
||||
'clone': '_clone'
|
||||
}
|
||||
|
||||
|
||||
|
@ -439,18 +524,16 @@ def create_script(command):
|
|||
f.close()
|
||||
|
||||
# Ensure the script is executable.
|
||||
os.chmod(script_file, 0755)
|
||||
os.chmod(script_file, 1755)
|
||||
|
||||
# Get temporary directory.
|
||||
tempdir = tempfile.gettempdir()
|
||||
|
||||
# Output log file.
|
||||
stdout = path.join(tempdir, 'lxc-attach-script.log')
|
||||
stdout_file = open(stdout, 'ab')
|
||||
stdout_file = open(path.join(tempdir, 'lxc-attach-script.log'), 'ab')
|
||||
|
||||
# Error log file.
|
||||
stderr = path.join(tempdir, 'lxc-attach-script.err')
|
||||
stderr_file = open(stderr, 'ab')
|
||||
stderr_file = open(path.join(tempdir, 'lxc-attach-script.err'), 'ab')
|
||||
|
||||
# Execute the script command.
|
||||
try:
|
||||
|
@ -482,6 +565,7 @@ class LxcContainerManagement(object):
|
|||
self.container_name = self.module.params['name']
|
||||
self.container = self.get_container_bind()
|
||||
self.archive_info = None
|
||||
self.clone_info = None
|
||||
|
||||
def get_container_bind(self):
|
||||
return lxc.Container(name=self.container_name)
|
||||
|
@ -502,15 +586,15 @@ class LxcContainerManagement(object):
|
|||
return num
|
||||
|
||||
@staticmethod
|
||||
def _container_exists(name):
|
||||
def _container_exists(container_name):
|
||||
"""Check if a container exists.
|
||||
|
||||
:param name: Name of the container.
|
||||
:param container_name: Name of the container.
|
||||
:type: ``str``
|
||||
:returns: True or False if the container is found.
|
||||
:rtype: ``bol``
|
||||
"""
|
||||
if [i for i in lxc.list_containers() if i == name]:
|
||||
if [i for i in lxc.list_containers() if i == container_name]:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
@ -543,6 +627,7 @@ class LxcContainerManagement(object):
|
|||
"""
|
||||
|
||||
# Remove incompatible storage backend options.
|
||||
variables = variables.copy()
|
||||
for v in LXC_BACKING_STORE[self.module.params['backing_store']]:
|
||||
variables.pop(v, None)
|
||||
|
||||
|
@ -655,6 +740,68 @@ class LxcContainerManagement(object):
|
|||
self._container_startup()
|
||||
self.container.freeze()
|
||||
|
||||
def _container_create_clone(self):
|
||||
"""Clone a new LXC container from an existing container.
|
||||
|
||||
This method will clone an existing container to a new container using
|
||||
the `clone_name` variable as the new container name. The method will
|
||||
create a container if the container `name` does not exist.
|
||||
|
||||
Note that cloning a container will ensure that the original container
|
||||
is "stopped" before the clone can be done. Because this operation can
|
||||
require a state change the method will return the original container
|
||||
to its prior state upon completion of the clone.
|
||||
|
||||
Once the clone is complete the new container will be left in a stopped
|
||||
state.
|
||||
"""
|
||||
|
||||
# Ensure that the state of the original container is stopped
|
||||
container_state = self._get_state()
|
||||
if container_state != 'stopped':
|
||||
self.state_change = True
|
||||
self.container.stop()
|
||||
|
||||
build_command = [
|
||||
self.module.get_bin_path('lxc-clone', True),
|
||||
]
|
||||
|
||||
build_command = self._add_variables(
|
||||
variables_dict=self._get_vars(
|
||||
variables=LXC_COMMAND_MAP['clone']['variables']
|
||||
),
|
||||
build_command=build_command
|
||||
)
|
||||
|
||||
# Load logging for the instance when creating it.
|
||||
if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE:
|
||||
build_command.append('--snapshot')
|
||||
# Check for backing_store == overlayfs if so force the use of snapshot
|
||||
# If overlay fs is used and snapshot is unset the clone command will
|
||||
# fail with an unsupported type.
|
||||
elif self.module.params.get('backing_store') == 'overlayfs':
|
||||
build_command.append('--snapshot')
|
||||
|
||||
rc, return_data, err = self._run_command(build_command)
|
||||
if rc != 0:
|
||||
message = "Failed executing lxc-clone."
|
||||
self.failure(
|
||||
err=err, rc=rc, msg=message, command=' '.join(
|
||||
build_command
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.state_change = True
|
||||
# Restore the original state of the origin container if it was
|
||||
# not in a stopped state.
|
||||
if container_state == 'running':
|
||||
self.container.start()
|
||||
elif container_state == 'frozen':
|
||||
self.container.start()
|
||||
self.container.freeze()
|
||||
|
||||
return True
|
||||
|
||||
def _create(self):
|
||||
"""Create a new LXC container.
|
||||
|
||||
|
@ -709,9 +856,9 @@ class LxcContainerManagement(object):
|
|||
|
||||
rc, return_data, err = self._run_command(build_command)
|
||||
if rc != 0:
|
||||
msg = "Failed executing lxc-create."
|
||||
message = "Failed executing lxc-create."
|
||||
self.failure(
|
||||
err=err, rc=rc, msg=msg, command=' '.join(build_command)
|
||||
err=err, rc=rc, msg=message, command=' '.join(build_command)
|
||||
)
|
||||
else:
|
||||
self.state_change = True
|
||||
|
@ -751,7 +898,7 @@ class LxcContainerManagement(object):
|
|||
:rtype: ``str``
|
||||
"""
|
||||
|
||||
if self._container_exists(name=self.container_name):
|
||||
if self._container_exists(container_name=self.container_name):
|
||||
return str(self.container.state).lower()
|
||||
else:
|
||||
return str('absent')
|
||||
|
@ -794,7 +941,7 @@ class LxcContainerManagement(object):
|
|||
rc=1,
|
||||
msg='The container [ %s ] failed to start. Check to lxc is'
|
||||
' available and that the container is in a functional'
|
||||
' state.'
|
||||
' state.' % self.container_name
|
||||
)
|
||||
|
||||
def _check_archive(self):
|
||||
|
@ -808,6 +955,23 @@ class LxcContainerManagement(object):
|
|||
'archive': self._container_create_tar()
|
||||
}
|
||||
|
||||
def _check_clone(self):
|
||||
"""Create a compressed archive of a container.
|
||||
|
||||
This will store archive_info in as self.archive_info
|
||||
"""
|
||||
|
||||
clone_name = self.module.params.get('clone_name')
|
||||
if clone_name:
|
||||
if not self._container_exists(container_name=clone_name):
|
||||
self.clone_info = {
|
||||
'cloned': self._container_create_clone()
|
||||
}
|
||||
else:
|
||||
self.clone_info = {
|
||||
'cloned': False
|
||||
}
|
||||
|
||||
def _destroyed(self, timeout=60):
|
||||
"""Ensure a container is destroyed.
|
||||
|
||||
|
@ -816,12 +980,15 @@ class LxcContainerManagement(object):
|
|||
"""
|
||||
|
||||
for _ in xrange(timeout):
|
||||
if not self._container_exists(name=self.container_name):
|
||||
if not self._container_exists(container_name=self.container_name):
|
||||
break
|
||||
|
||||
# Check if the container needs to have an archive created.
|
||||
self._check_archive()
|
||||
|
||||
# Check if the container is to be cloned
|
||||
self._check_clone()
|
||||
|
||||
if self._get_state() != 'stopped':
|
||||
self.state_change = True
|
||||
self.container.stop()
|
||||
|
@ -852,7 +1019,7 @@ class LxcContainerManagement(object):
|
|||
"""
|
||||
|
||||
self.check_count(count=count, method='frozen')
|
||||
if self._container_exists(name=self.container_name):
|
||||
if self._container_exists(container_name=self.container_name):
|
||||
self._execute_command()
|
||||
|
||||
# Perform any configuration updates
|
||||
|
@ -871,6 +1038,9 @@ class LxcContainerManagement(object):
|
|||
|
||||
# Check if the container needs to have an archive created.
|
||||
self._check_archive()
|
||||
|
||||
# Check if the container is to be cloned
|
||||
self._check_clone()
|
||||
else:
|
||||
self._create()
|
||||
count += 1
|
||||
|
@ -886,7 +1056,7 @@ class LxcContainerManagement(object):
|
|||
"""
|
||||
|
||||
self.check_count(count=count, method='restart')
|
||||
if self._container_exists(name=self.container_name):
|
||||
if self._container_exists(container_name=self.container_name):
|
||||
self._execute_command()
|
||||
|
||||
# Perform any configuration updates
|
||||
|
@ -898,6 +1068,9 @@ class LxcContainerManagement(object):
|
|||
|
||||
# Check if the container needs to have an archive created.
|
||||
self._check_archive()
|
||||
|
||||
# Check if the container is to be cloned
|
||||
self._check_clone()
|
||||
else:
|
||||
self._create()
|
||||
count += 1
|
||||
|
@ -913,7 +1086,7 @@ class LxcContainerManagement(object):
|
|||
"""
|
||||
|
||||
self.check_count(count=count, method='stop')
|
||||
if self._container_exists(name=self.container_name):
|
||||
if self._container_exists(container_name=self.container_name):
|
||||
self._execute_command()
|
||||
|
||||
# Perform any configuration updates
|
||||
|
@ -925,6 +1098,9 @@ class LxcContainerManagement(object):
|
|||
|
||||
# Check if the container needs to have an archive created.
|
||||
self._check_archive()
|
||||
|
||||
# Check if the container is to be cloned
|
||||
self._check_clone()
|
||||
else:
|
||||
self._create()
|
||||
count += 1
|
||||
|
@ -940,7 +1116,7 @@ class LxcContainerManagement(object):
|
|||
"""
|
||||
|
||||
self.check_count(count=count, method='start')
|
||||
if self._container_exists(name=self.container_name):
|
||||
if self._container_exists(container_name=self.container_name):
|
||||
container_state = self._get_state()
|
||||
if container_state == 'running':
|
||||
pass
|
||||
|
@ -965,6 +1141,9 @@ class LxcContainerManagement(object):
|
|||
|
||||
# Check if the container needs to have an archive created.
|
||||
self._check_archive()
|
||||
|
||||
# Check if the container is to be cloned
|
||||
self._check_clone()
|
||||
else:
|
||||
self._create()
|
||||
count += 1
|
||||
|
@ -1007,18 +1186,18 @@ class LxcContainerManagement(object):
|
|||
all_lvms = [i.split() for i in stdout.splitlines()][1:]
|
||||
return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg]
|
||||
|
||||
def _get_vg_free_pe(self, name):
|
||||
def _get_vg_free_pe(self, vg_name):
|
||||
"""Return the available size of a given VG.
|
||||
|
||||
:param name: Name of volume.
|
||||
:type name: ``str``
|
||||
:param vg_name: Name of volume.
|
||||
:type vg_name: ``str``
|
||||
:returns: size and measurement of an LV
|
||||
:type: ``tuple``
|
||||
"""
|
||||
|
||||
build_command = [
|
||||
'vgdisplay',
|
||||
name,
|
||||
vg_name,
|
||||
'--units',
|
||||
'g'
|
||||
]
|
||||
|
@ -1027,7 +1206,7 @@ class LxcContainerManagement(object):
|
|||
self.failure(
|
||||
err=err,
|
||||
rc=rc,
|
||||
msg='failed to read vg %s' % name,
|
||||
msg='failed to read vg %s' % vg_name,
|
||||
command=' '.join(build_command)
|
||||
)
|
||||
|
||||
|
@ -1036,17 +1215,17 @@ class LxcContainerManagement(object):
|
|||
_free_pe = free_pe[0].split()
|
||||
return float(_free_pe[-2]), _free_pe[-1]
|
||||
|
||||
def _get_lv_size(self, name):
|
||||
def _get_lv_size(self, lv_name):
|
||||
"""Return the available size of a given LV.
|
||||
|
||||
:param name: Name of volume.
|
||||
:type name: ``str``
|
||||
:param lv_name: Name of volume.
|
||||
:type lv_name: ``str``
|
||||
:returns: size and measurement of an LV
|
||||
:type: ``tuple``
|
||||
"""
|
||||
|
||||
vg = self._get_lxc_vg()
|
||||
lv = os.path.join(vg, name)
|
||||
lv = os.path.join(vg, lv_name)
|
||||
build_command = [
|
||||
'lvdisplay',
|
||||
lv,
|
||||
|
@ -1080,7 +1259,7 @@ class LxcContainerManagement(object):
|
|||
"""
|
||||
|
||||
vg = self._get_lxc_vg()
|
||||
free_space, messurement = self._get_vg_free_pe(name=vg)
|
||||
free_space, messurement = self._get_vg_free_pe(vg_name=vg)
|
||||
|
||||
if free_space < float(snapshot_size_gb):
|
||||
message = (
|
||||
|
@ -1183,25 +1362,25 @@ class LxcContainerManagement(object):
|
|||
|
||||
return archive_name
|
||||
|
||||
def _lvm_lv_remove(self, name):
|
||||
def _lvm_lv_remove(self, lv_name):
|
||||
"""Remove an LV.
|
||||
|
||||
:param name: The name of the logical volume
|
||||
:type name: ``str``
|
||||
:param lv_name: The name of the logical volume
|
||||
:type lv_name: ``str``
|
||||
"""
|
||||
|
||||
vg = self._get_lxc_vg()
|
||||
build_command = [
|
||||
self.module.get_bin_path('lvremove', True),
|
||||
"-f",
|
||||
"%s/%s" % (vg, name),
|
||||
"%s/%s" % (vg, lv_name),
|
||||
]
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
rc=rc,
|
||||
msg='Failed to remove LVM LV %s/%s' % (vg, name),
|
||||
msg='Failed to remove LVM LV %s/%s' % (vg, lv_name),
|
||||
command=' '.join(build_command)
|
||||
)
|
||||
|
||||
|
@ -1213,21 +1392,34 @@ class LxcContainerManagement(object):
|
|||
:param temp_dir: path to the temporary local working directory
|
||||
:type temp_dir: ``str``
|
||||
"""
|
||||
# This loop is created to support overlayfs archives. This should
|
||||
# squash all of the layers into a single archive.
|
||||
fs_paths = container_path.split(':')
|
||||
if 'overlayfs' in fs_paths:
|
||||
fs_paths.pop(fs_paths.index('overlayfs'))
|
||||
|
||||
build_command = [
|
||||
self.module.get_bin_path('rsync', True),
|
||||
'-aHAX',
|
||||
container_path,
|
||||
temp_dir
|
||||
]
|
||||
rc, stdout, err = self._run_command(build_command, unsafe_shell=True)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
rc=rc,
|
||||
msg='failed to perform archive',
|
||||
command=' '.join(build_command)
|
||||
for fs_path in fs_paths:
|
||||
# Set the path to the container data
|
||||
fs_path = os.path.dirname(fs_path)
|
||||
|
||||
# Run the sync command
|
||||
build_command = [
|
||||
self.module.get_bin_path('rsync', True),
|
||||
'-aHAX',
|
||||
fs_path,
|
||||
temp_dir
|
||||
]
|
||||
rc, stdout, err = self._run_command(
|
||||
build_command,
|
||||
unsafe_shell=True
|
||||
)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
rc=rc,
|
||||
msg='failed to perform archive',
|
||||
command=' '.join(build_command)
|
||||
)
|
||||
|
||||
def _unmount(self, mount_point):
|
||||
"""Unmount a file system.
|
||||
|
@ -1249,6 +1441,33 @@ class LxcContainerManagement(object):
|
|||
command=' '.join(build_command)
|
||||
)
|
||||
|
||||
def _overlayfs_mount(self, lowerdir, upperdir, mount_point):
|
||||
"""mount an lv.
|
||||
|
||||
:param lowerdir: name/path of the lower directory
|
||||
:type lowerdir: ``str``
|
||||
:param upperdir: name/path of the upper directory
|
||||
:type upperdir: ``str``
|
||||
:param mount_point: path on the file system that is mounted.
|
||||
:type mount_point: ``str``
|
||||
"""
|
||||
|
||||
build_command = [
|
||||
self.module.get_bin_path('mount', True),
|
||||
'-t overlayfs',
|
||||
'-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
|
||||
'overlayfs',
|
||||
mount_point,
|
||||
]
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
rc=rc,
|
||||
msg='failed to mount overlayfs:%s:%s to %s -- Command: %s'
|
||||
% (lowerdir, upperdir, mount_point, build_command)
|
||||
)
|
||||
|
||||
def _container_create_tar(self):
|
||||
"""Create a tar archive from an LXC container.
|
||||
|
||||
|
@ -1275,13 +1494,15 @@ class LxcContainerManagement(object):
|
|||
|
||||
# Test if the containers rootfs is a block device
|
||||
block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev'))
|
||||
|
||||
# Test if the container is using overlayfs
|
||||
overlayfs_backed = lxc_rootfs.startswith('overlayfs')
|
||||
|
||||
mount_point = os.path.join(work_dir, 'rootfs')
|
||||
|
||||
# Set the snapshot name if needed
|
||||
snapshot_name = '%s_lxc_snapshot' % self.container_name
|
||||
|
||||
# Set the path to the container data
|
||||
container_path = os.path.dirname(lxc_rootfs)
|
||||
container_state = self._get_state()
|
||||
try:
|
||||
# Ensure the original container is stopped or frozen
|
||||
|
@ -1292,7 +1513,7 @@ class LxcContainerManagement(object):
|
|||
self.container.stop()
|
||||
|
||||
# Sync the container data from the container_path to work_dir
|
||||
self._rsync_data(container_path, temp_dir)
|
||||
self._rsync_data(lxc_rootfs, temp_dir)
|
||||
|
||||
if block_backed:
|
||||
if snapshot_name not in self._lvm_lv_list():
|
||||
|
@ -1301,7 +1522,7 @@ class LxcContainerManagement(object):
|
|||
|
||||
# Take snapshot
|
||||
size, measurement = self._get_lv_size(
|
||||
name=self.container_name
|
||||
lv_name=self.container_name
|
||||
)
|
||||
self._lvm_snapshot_create(
|
||||
source_lv=self.container_name,
|
||||
|
@ -1322,6 +1543,25 @@ class LxcContainerManagement(object):
|
|||
' up old snapshot of containers before continuing.'
|
||||
% snapshot_name
|
||||
)
|
||||
elif overlayfs_backed:
|
||||
lowerdir, upperdir = lxc_rootfs.split(':')[1:]
|
||||
self._overlayfs_mount(
|
||||
lowerdir=lowerdir,
|
||||
upperdir=upperdir,
|
||||
mount_point=mount_point
|
||||
)
|
||||
|
||||
# Set the state as changed and set a new fact
|
||||
self.state_change = True
|
||||
return self._create_tar(source_dir=work_dir)
|
||||
finally:
|
||||
if block_backed or overlayfs_backed:
|
||||
# unmount snapshot
|
||||
self._unmount(mount_point)
|
||||
|
||||
if block_backed:
|
||||
# Remove snapshot
|
||||
self._lvm_lv_remove(snapshot_name)
|
||||
|
||||
# Restore original state of container
|
||||
if container_state == 'running':
|
||||
|
@ -1330,17 +1570,6 @@ class LxcContainerManagement(object):
|
|||
else:
|
||||
self.container.start()
|
||||
|
||||
# Set the state as changed and set a new fact
|
||||
self.state_change = True
|
||||
return self._create_tar(source_dir=work_dir)
|
||||
finally:
|
||||
if block_backed:
|
||||
# unmount snapshot
|
||||
self._unmount(mount_point)
|
||||
|
||||
# Remove snapshot
|
||||
self._lvm_lv_remove(snapshot_name)
|
||||
|
||||
# Remove tmpdir
|
||||
shutil.rmtree(temp_dir)
|
||||
|
||||
|
@ -1374,6 +1603,9 @@ class LxcContainerManagement(object):
|
|||
if self.archive_info:
|
||||
outcome.update(self.archive_info)
|
||||
|
||||
if self.clone_info:
|
||||
outcome.update(self.clone_info)
|
||||
|
||||
self.module.exit_json(
|
||||
changed=self.state_change,
|
||||
lxc_container=outcome
|
||||
|
@ -1450,6 +1682,14 @@ def main():
|
|||
choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i],
|
||||
default='INFO'
|
||||
),
|
||||
clone_name=dict(
|
||||
type='str',
|
||||
required=False
|
||||
),
|
||||
clone_snapshot=dict(
|
||||
choices=BOOLEANS,
|
||||
default='false'
|
||||
),
|
||||
archive=dict(
|
||||
choices=BOOLEANS,
|
||||
default='false'
|
||||
|
@ -1477,4 +1717,3 @@ def main():
|
|||
# import module bits
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
||||
|
||||
|
|
6
cloud/misc/ovirt.py
Executable file → Normal file
6
cloud/misc/ovirt.py
Executable file → Normal file
|
@ -20,7 +20,7 @@
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ovirt
|
||||
author: Vincent Van der Kussen
|
||||
author: '"Vincent Van der Kussen (@vincentvdk)" <vincent at vanderkussen.org>'
|
||||
short_description: oVirt/RHEV platform management
|
||||
description:
|
||||
- allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform
|
||||
|
@ -152,7 +152,9 @@ options:
|
|||
aliases: []
|
||||
choices: ['present', 'absent', 'shutdown', 'started', 'restarted']
|
||||
|
||||
requirements: [ "ovirt-engine-sdk" ]
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "ovirt-engine-sdk-python"
|
||||
'''
|
||||
EXAMPLES = '''
|
||||
# Basic example provisioning from image.
|
||||
|
|
432
cloud/misc/proxmox.py
Normal file
432
cloud/misc/proxmox.py
Normal file
|
@ -0,0 +1,432 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: proxmox
|
||||
short_description: management of instances in Proxmox VE cluster
|
||||
description:
|
||||
- allows you to create/delete/stop instances in Proxmox VE cluster
|
||||
version_added: "2.0"
|
||||
options:
|
||||
api_host:
|
||||
description:
|
||||
- the host of the Proxmox VE cluster
|
||||
required: true
|
||||
api_user:
|
||||
description:
|
||||
- the user to authenticate with
|
||||
required: true
|
||||
api_password:
|
||||
description:
|
||||
- the password to authenticate with
|
||||
- you can use PROXMOX_PASSWORD environment variable
|
||||
default: null
|
||||
required: false
|
||||
vmid:
|
||||
description:
|
||||
- the instance id
|
||||
default: null
|
||||
required: true
|
||||
https_verify_ssl:
|
||||
description:
|
||||
- enable / disable https certificate verification
|
||||
default: false
|
||||
required: false
|
||||
type: boolean
|
||||
node:
|
||||
description:
|
||||
- Proxmox VE node, when new VM will be created
|
||||
- required only for C(state=present)
|
||||
- for another states will be autodiscovered
|
||||
default: null
|
||||
required: false
|
||||
password:
|
||||
description:
|
||||
- the instance root password
|
||||
- required only for C(state=present)
|
||||
default: null
|
||||
required: false
|
||||
hostname:
|
||||
description:
|
||||
- the instance hostname
|
||||
- required only for C(state=present)
|
||||
default: null
|
||||
required: false
|
||||
ostemplate:
|
||||
description:
|
||||
- the template for VM creating
|
||||
- required only for C(state=present)
|
||||
default: null
|
||||
required: false
|
||||
disk:
|
||||
description:
|
||||
- hard disk size in GB for instance
|
||||
default: 3
|
||||
required: false
|
||||
cpus:
|
||||
description:
|
||||
- numbers of allocated cpus for instance
|
||||
default: 1
|
||||
required: false
|
||||
memory:
|
||||
description:
|
||||
- memory size in MB for instance
|
||||
default: 512
|
||||
required: false
|
||||
swap:
|
||||
description:
|
||||
- swap memory size in MB for instance
|
||||
default: 0
|
||||
required: false
|
||||
netif:
|
||||
description:
|
||||
- specifies network interfaces for the container
|
||||
default: null
|
||||
required: false
|
||||
type: string
|
||||
ip_address:
|
||||
description:
|
||||
- specifies the address the container will be assigned
|
||||
default: null
|
||||
required: false
|
||||
type: string
|
||||
onboot:
|
||||
description:
|
||||
- specifies whether a VM will be started during system bootup
|
||||
default: false
|
||||
required: false
|
||||
type: boolean
|
||||
storage:
|
||||
description:
|
||||
- target storage
|
||||
default: 'local'
|
||||
required: false
|
||||
type: string
|
||||
cpuunits:
|
||||
description:
|
||||
- CPU weight for a VM
|
||||
default: 1000
|
||||
required: false
|
||||
type: integer
|
||||
nameserver:
|
||||
description:
|
||||
- sets DNS server IP address for a container
|
||||
default: null
|
||||
required: false
|
||||
type: string
|
||||
searchdomain:
|
||||
description:
|
||||
- sets DNS search domain for a container
|
||||
default: null
|
||||
required: false
|
||||
type: string
|
||||
timeout:
|
||||
description:
|
||||
- timeout for operations
|
||||
default: 30
|
||||
required: false
|
||||
type: integer
|
||||
force:
|
||||
description:
|
||||
- forcing operations
|
||||
- can be used only with states C(present), C(stopped), C(restarted)
|
||||
- with C(state=present) force option allow to overwrite existing container
|
||||
- with states C(stopped) , C(restarted) allow to force stop instance
|
||||
default: false
|
||||
required: false
|
||||
type: boolean
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the instance
|
||||
choices: ['present', 'started', 'absent', 'stopped', 'restarted']
|
||||
default: present
|
||||
notes:
|
||||
- Requires proxmoxer and requests modules on host. This modules can be installed with pip.
|
||||
requirements: [ "proxmoxer", "requests" ]
|
||||
author: "Sergei Antipov @UnderGreen"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create new container with minimal options
|
||||
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
|
||||
# Create new container with minimal options with force(it will rewrite existing container)
|
||||
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' force=yes
|
||||
|
||||
# Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
|
||||
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
|
||||
# Start container
|
||||
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=started
|
||||
|
||||
# Stop container
|
||||
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped
|
||||
|
||||
# Stop container with force
|
||||
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' force=yes state=stopped
|
||||
|
||||
# Restart container(stopped or mounted container you can't restart)
|
||||
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped
|
||||
|
||||
# Remove container
|
||||
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=absent
|
||||
'''
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
try:
|
||||
from proxmoxer import ProxmoxAPI
|
||||
HAS_PROXMOXER = True
|
||||
except ImportError:
|
||||
HAS_PROXMOXER = False
|
||||
|
||||
def get_instance(proxmox, vmid):
|
||||
return [ vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid) ]
|
||||
|
||||
def content_check(proxmox, node, ostemplate, storage):
|
||||
return [ True for cnt in proxmox.nodes(node).storage(storage).content.get() if cnt['volid'] == ostemplate ]
|
||||
|
||||
def node_check(proxmox, node):
|
||||
return [ True for nd in proxmox.nodes.get() if nd['node'] == node ]
|
||||
|
||||
def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
|
||||
proxmox_node = proxmox.nodes(node)
|
||||
taskid = proxmox_node.openvz.create(vmid=vmid, storage=storage, memory=memory, swap=swap,
|
||||
cpus=cpus, disk=disk, **kwargs)
|
||||
|
||||
while timeout:
|
||||
if ( proxmox_node.tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s'
|
||||
% proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
def start_instance(module, proxmox, vm, vmid, timeout):
|
||||
taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.start.post()
|
||||
while timeout:
|
||||
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s'
|
||||
% proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def stop_instance(module, proxmox, vm, vmid, timeout, force):
|
||||
if force:
|
||||
taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.shutdown.post(forceStop=1)
|
||||
else:
|
||||
taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.shutdown.post()
|
||||
while timeout:
|
||||
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s'
|
||||
% proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def umount_instance(module, proxmox, vm, vmid, timeout):
|
||||
taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.umount.post()
|
||||
while timeout:
|
||||
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s'
|
||||
% proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
api_host = dict(required=True),
|
||||
api_user = dict(required=True),
|
||||
api_password = dict(no_log=True),
|
||||
vmid = dict(required=True),
|
||||
https_verify_ssl = dict(type='bool', choices=BOOLEANS, default='no'),
|
||||
node = dict(),
|
||||
password = dict(no_log=True),
|
||||
hostname = dict(),
|
||||
ostemplate = dict(),
|
||||
disk = dict(type='int', default=3),
|
||||
cpus = dict(type='int', default=1),
|
||||
memory = dict(type='int', default=512),
|
||||
swap = dict(type='int', default=0),
|
||||
netif = dict(),
|
||||
ip_address = dict(),
|
||||
onboot = dict(type='bool', choices=BOOLEANS, default='no'),
|
||||
storage = dict(default='local'),
|
||||
cpuunits = dict(type='int', default=1000),
|
||||
nameserver = dict(),
|
||||
searchdomain = dict(),
|
||||
timeout = dict(type='int', default=30),
|
||||
force = dict(type='bool', choices=BOOLEANS, default='no'),
|
||||
state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
|
||||
)
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg='proxmoxer required for this module')
|
||||
|
||||
state = module.params['state']
|
||||
api_user = module.params['api_user']
|
||||
api_host = module.params['api_host']
|
||||
api_password = module.params['api_password']
|
||||
vmid = module.params['vmid']
|
||||
https_verify_ssl = module.params['https_verify_ssl']
|
||||
node = module.params['node']
|
||||
disk = module.params['disk']
|
||||
cpus = module.params['cpus']
|
||||
memory = module.params['memory']
|
||||
swap = module.params['swap']
|
||||
storage = module.params['storage']
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# If password not set get it from PROXMOX_PASSWORD env
|
||||
if not api_password:
|
||||
try:
|
||||
api_password = os.environ['PROXMOX_PASSWORD']
|
||||
except KeyError, e:
|
||||
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
|
||||
|
||||
try:
|
||||
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=https_verify_ssl)
|
||||
except Exception, e:
|
||||
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
if get_instance(proxmox, vmid) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
|
||||
elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
|
||||
module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
|
||||
elif not node_check(proxmox, node):
|
||||
module.fail_json(msg="node '%s' not exists in cluster" % node)
|
||||
elif not content_check(proxmox, node, module.params['ostemplate'], storage):
|
||||
module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
|
||||
% (module.params['ostemplate'], node, storage))
|
||||
|
||||
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
|
||||
password = module.params['password'],
|
||||
hostname = module.params['hostname'],
|
||||
ostemplate = module.params['ostemplate'],
|
||||
netif = module.params['netif'],
|
||||
ip_address = module.params['ip_address'],
|
||||
onboot = int(module.params['onboot']),
|
||||
cpuunits = module.params['cpuunits'],
|
||||
nameserver = module.params['nameserver'],
|
||||
searchdomain = module.params['searchdomain'],
|
||||
force = int(module.params['force']))
|
||||
|
||||
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
|
||||
except Exception, e:
|
||||
module.fail_json(msg="creation of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
|
||||
elif state == 'started':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
|
||||
if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'running':
|
||||
module.exit_json(changed=False, msg="VM %s is already running" % vmid)
|
||||
|
||||
if start_instance(module, proxmox, vm, vmid, timeout):
|
||||
module.exit_json(changed=True, msg="VM %s started" % vmid)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
|
||||
elif state == 'stopped':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
|
||||
|
||||
if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'mounted':
|
||||
if module.params['force']:
|
||||
if umount_instance(module, proxmox, vm, vmid, timeout):
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
|
||||
else:
|
||||
module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
|
||||
"You can use force option to umount it.") % vmid)
|
||||
|
||||
if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'stopped':
|
||||
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
|
||||
|
||||
if stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']):
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="stopping of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
|
||||
elif state == 'restarted':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
|
||||
if ( proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'stopped'
|
||||
or proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'mounted' ):
|
||||
module.exit_json(changed=False, msg="VM %s is not running" % vmid)
|
||||
|
||||
if ( stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']) and
|
||||
start_instance(module, proxmox, vm, vmid, timeout) ):
|
||||
module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="restarting of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
|
||||
elif state == 'absent':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
|
||||
|
||||
if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'running':
|
||||
module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
|
||||
|
||||
if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'mounted':
|
||||
module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
|
||||
|
||||
taskid = proxmox.nodes(vm[0]['node']).openvz.delete(vmid)
|
||||
while timeout:
|
||||
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
module.exit_json(changed=True, msg="VM %s removed" % vmid)
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
|
||||
% proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
|
@ -55,8 +55,13 @@ options:
|
|||
- XML document used with the define command
|
||||
required: false
|
||||
default: null
|
||||
requirements: [ "libvirt" ]
|
||||
author: Michael DeHaan, Seth Vidal
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "libvirt-python"
|
||||
author:
|
||||
- "Ansible Core Team"
|
||||
- '"Michael DeHaan (@mpdehaan)" <michael.dehaan@gmail.com>'
|
||||
- '"Seth Vidal (@skvidal)" <skvidal@fedoraproject.org>'
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -25,10 +25,11 @@ short_description: Manage VMware vSphere Datacenters
|
|||
description:
|
||||
- Manage VMware vSphere Datacenters
|
||||
version_added: 2.0
|
||||
author: Joseph Callen
|
||||
author: '"Joseph Callen (@jcpowermac)" <jcallen () csc.com>'
|
||||
notes:
|
||||
- Tested on vSphere 5.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
hostname:
|
||||
|
|
|
@ -38,10 +38,11 @@ description:
|
|||
changed occurred. An api method is planned to supply this metadata so at that
|
||||
stage change management will be added.
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- python-consul
|
||||
- requests
|
||||
version_added: "1.9"
|
||||
author: Steve Gargan (steve.gargan@gmail.com)
|
||||
version_added: "2.0"
|
||||
author: '"Steve Gargan (@sgargan)" <steve.gargan@gmail.com>'
|
||||
options:
|
||||
state:
|
||||
description:
|
|
@ -25,11 +25,12 @@ description:
|
|||
rules in a consul cluster via the agent. For more details on using and
|
||||
configuring ACLs, see https://www.consul.io/docs/internals/acl.html.
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- python-consul
|
||||
- pyhcl
|
||||
- requests
|
||||
version_added: "1.9"
|
||||
author: Steve Gargan (steve.gargan@gmail.com)
|
||||
version_added: "2.0"
|
||||
author: '"Steve Gargan (@sgargan)" <steve.gargan@gmail.com>'
|
||||
options:
|
||||
mgmt_token:
|
||||
description:
|
|
@ -28,10 +28,11 @@ description:
|
|||
represents a prefix then Note that when a value is removed, the existing
|
||||
value if any is returned as part of the results.
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- python-consul
|
||||
- requests
|
||||
version_added: "1.9"
|
||||
author: Steve Gargan (steve.gargan@gmail.com)
|
||||
version_added: "2.0"
|
||||
author: '"Steve Gargan (@sgargan)" <steve.gargan@gmail.com>'
|
||||
options:
|
||||
state:
|
||||
description:
|
|
@ -26,10 +26,11 @@ description:
|
|||
to implement distributed locks. In depth documentation for working with
|
||||
sessions can be found here http://www.consul.io/docs/internals/sessions.html
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- python-consul
|
||||
- requests
|
||||
version_added: "1.9"
|
||||
author: Steve Gargan (steve.gargan@gmail.com)
|
||||
version_added: "2.0"
|
||||
author: '"Steve Gargan (@sgargan)" <steve.gargan@gmail.com>'
|
||||
options:
|
||||
state:
|
||||
description:
|
|
@ -91,7 +91,7 @@ notes:
|
|||
- Requires the pymongo Python package on the remote host, version 2.4.2+. This
|
||||
can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
|
||||
requirements: [ "pymongo" ]
|
||||
author: Elliott Foster
|
||||
author: '"Elliott Foster (@elliotttf)" <elliott@fourkitchens.com>'
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -98,7 +98,7 @@ notes:
|
|||
this needs to be in the redis.conf in the masterauth variable
|
||||
|
||||
requirements: [ redis ]
|
||||
author: Xabier Larrakoetxea
|
||||
author: "Xabier Larrakoetxea (@slok)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -26,6 +26,9 @@ description:
|
|||
- This module can be used to join nodes to a cluster, check
|
||||
the status of the cluster.
|
||||
version_added: "1.2"
|
||||
author:
|
||||
- '"James Martin (@jsmartin)" <jmartin@ansible.com>'
|
||||
- '"Drew Kerrigan (@drewkerrigan)" <dkerrigan@basho.com>'
|
||||
options:
|
||||
command:
|
||||
description:
|
||||
|
|
|
@ -30,6 +30,7 @@ short_description: Manage MySQL replication
|
|||
description:
|
||||
- Manages MySQL server replication, slave, master status get and change master host.
|
||||
version_added: "1.3"
|
||||
author: '"Balazs Pocze (@banyek)" <banyek@gawker.com>'
|
||||
options:
|
||||
mode:
|
||||
description:
|
||||
|
@ -93,7 +94,7 @@ options:
|
|||
master_ssl:
|
||||
description:
|
||||
- same as mysql variable
|
||||
possible values: 0,1
|
||||
choices: [ 0, 1 ]
|
||||
master_ssl_ca:
|
||||
description:
|
||||
- same as mysql variable
|
||||
|
@ -109,7 +110,12 @@ options:
|
|||
master_ssl_cipher:
|
||||
description:
|
||||
- same as mysql variable
|
||||
|
||||
master_auto_position:
|
||||
description:
|
||||
- does the host uses GTID based replication or not
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.0"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -242,6 +248,7 @@ def main():
|
|||
login_port=dict(default=3306, type='int'),
|
||||
login_unix_socket=dict(default=None),
|
||||
mode=dict(default="getslave", choices=["getmaster", "getslave", "changemaster", "stopslave", "startslave"]),
|
||||
master_auto_position=dict(default=False, type='bool'),
|
||||
master_host=dict(default=None),
|
||||
master_user=dict(default=None),
|
||||
master_password=dict(default=None),
|
||||
|
@ -279,6 +286,7 @@ def main():
|
|||
master_ssl_cert = module.params["master_ssl_cert"]
|
||||
master_ssl_key = module.params["master_ssl_key"]
|
||||
master_ssl_cipher = module.params["master_ssl_cipher"]
|
||||
master_auto_position = module.params["master_auto_position"]
|
||||
|
||||
if not mysqldb_found:
|
||||
module.fail_json(msg="the python mysqldb module is required")
|
||||
|
@ -376,6 +384,8 @@ def main():
|
|||
if master_ssl_cipher:
|
||||
chm.append("MASTER_SSL_CIPHER=%(master_ssl_cipher)s")
|
||||
chm_params['master_ssl_cipher'] = master_ssl_cipher
|
||||
if master_auto_position:
|
||||
chm.append("MASTER_AUTO_POSITION = 1")
|
||||
changemaster(cursor, chm, chm_params)
|
||||
module.exit_json(changed=True)
|
||||
elif mode in "startslave":
|
||||
|
|
|
@ -65,7 +65,7 @@ notes:
|
|||
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
|
||||
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module.
|
||||
requirements: [ psycopg2 ]
|
||||
author: Daniel Schep
|
||||
author: "Daniel Schep (@dschep)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -95,7 +95,7 @@ notes:
|
|||
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
|
||||
on the remote host before using this module.
|
||||
requirements: [ psycopg2 ]
|
||||
author: Jens Depuydt
|
||||
author: "Jens Depuydt (@jensdepuydt)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -67,7 +67,7 @@ notes:
|
|||
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
|
||||
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
|
||||
requirements: [ 'unixODBC', 'pyodbc' ]
|
||||
author: Dariusz Owczarek
|
||||
author: "Dariusz Owczarek (@dareko)"
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
|
|
|
@ -59,7 +59,7 @@ notes:
|
|||
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
|
||||
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
|
||||
requirements: [ 'unixODBC', 'pyodbc' ]
|
||||
author: Dariusz Owczarek
|
||||
author: "Dariusz Owczarek (@dareko)"
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
|
|
|
@ -75,7 +75,7 @@ notes:
|
|||
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
|
||||
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
|
||||
requirements: [ 'unixODBC', 'pyodbc' ]
|
||||
author: Dariusz Owczarek
|
||||
author: "Dariusz Owczarek (@dareko)"
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
|
|
|
@ -91,7 +91,7 @@ notes:
|
|||
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
|
||||
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
|
||||
requirements: [ 'unixODBC', 'pyodbc' ]
|
||||
author: Dariusz Owczarek
|
||||
author: "Dariusz Owczarek (@dareko)"
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
|
|
|
@ -107,7 +107,7 @@ notes:
|
|||
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
|
||||
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
|
||||
requirements: [ 'unixODBC', 'pyodbc' ]
|
||||
author: Dariusz Owczarek
|
||||
author: "Dariusz Owczarek (@dareko)"
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
|
@ -233,7 +233,10 @@ def present(user_facts, cursor, user, profile, resource_pool,
|
|||
changed = False
|
||||
query_fragments = ["alter user {0}".format(user)]
|
||||
if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'):
|
||||
state = 'lock' if locked else 'unlock'
|
||||
if locked:
|
||||
state = 'lock'
|
||||
else:
|
||||
state = 'unlock'
|
||||
query_fragments.append("account {0}".format(state))
|
||||
changed = True
|
||||
if password and password != user_facts[user_key]['password']:
|
||||
|
|
6
files/patch.py
Executable file → Normal file
6
files/patch.py
Executable file → Normal file
|
@ -22,7 +22,9 @@
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: patch
|
||||
author: Luis Alberto Perez Lazaro, Jakub Jirutka
|
||||
author:
|
||||
- '"Jakub Jirutka (@jirutka)" <jakub@jirutka.cz>'
|
||||
- '"Luis Alberto Perez Lazaro (@luisperlaz)" <luisperlazaro@gmail.com>'
|
||||
version_added: 1.9
|
||||
description:
|
||||
- Apply patch files using the GNU patch tool.
|
||||
|
@ -110,7 +112,7 @@ def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_ru
|
|||
|
||||
(rc, out, err) = patch_func(opts)
|
||||
if rc != 0:
|
||||
msg = out if not err else err
|
||||
msg = err or out
|
||||
raise PatchError(msg)
|
||||
|
||||
|
||||
|
|
214
messaging/rabbitmq_binding.py
Normal file
214
messaging/rabbitmq_binding.py
Normal file
|
@ -0,0 +1,214 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rabbitmq_binding
|
||||
author: '"Manuel Sousa (@manuel-sousa)" <manuel.sousa@gmail.com>'
|
||||
version_added: "2.0"
|
||||
|
||||
short_description: This module manages rabbitMQ bindings
|
||||
description:
|
||||
- This module uses rabbitMQ Rest API to create/delete bindings
|
||||
requirements: [ python requests ]
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Whether the exchange should be present or absent
|
||||
- Only present implemented atm
|
||||
choices: [ "present", "absent" ]
|
||||
required: false
|
||||
default: present
|
||||
name:
|
||||
description:
|
||||
- source exchange to create binding on
|
||||
required: true
|
||||
aliases: [ "src", "source" ]
|
||||
login_user:
|
||||
description:
|
||||
- rabbitMQ user for connection
|
||||
required: false
|
||||
default: guest
|
||||
login_password:
|
||||
description:
|
||||
- rabbitMQ password for connection
|
||||
required: false
|
||||
default: false
|
||||
login_host:
|
||||
description:
|
||||
- rabbitMQ host for connection
|
||||
required: false
|
||||
default: localhost
|
||||
login_port:
|
||||
description:
|
||||
- rabbitMQ management api port
|
||||
required: false
|
||||
default: 15672
|
||||
vhost:
|
||||
description:
|
||||
- rabbitMQ virtual host
|
||||
- default vhost is /
|
||||
required: false
|
||||
default: "/"
|
||||
destination:
|
||||
description:
|
||||
- destination exchange or queue for the binding
|
||||
required: true
|
||||
aliases: [ "dst", "dest" ]
|
||||
destination_type:
|
||||
description:
|
||||
- Either queue or exchange
|
||||
required: true
|
||||
choices: [ "queue", "exchange" ]
|
||||
aliases: [ "type", "dest_type" ]
|
||||
routing_key:
|
||||
description:
|
||||
- routing key for the binding
|
||||
- default is #
|
||||
required: false
|
||||
default: "#"
|
||||
arguments:
|
||||
description:
|
||||
- extra arguments for exchange. If defined this argument is a key/value dictionary
|
||||
required: false
|
||||
default: {}
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Bind myQueue to directExchange with routing key info
|
||||
- rabbitmq_binding: name=directExchange destination=myQueue type=queue routing_key=info
|
||||
|
||||
# Bind directExchange to topicExchange with routing key *.info
|
||||
- rabbitmq_binding: name=topicExchange destination=topicExchange type=exchange routing_key="*.info"
|
||||
'''
|
||||
|
||||
import requests
|
||||
import urllib
|
||||
import json
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
state = dict(default='present', choices=['present', 'absent'], type='str'),
|
||||
name = dict(required=True, aliases=[ "src", "source" ], type='str'),
|
||||
login_user = dict(default='guest', type='str'),
|
||||
login_password = dict(default='guest', type='str', no_log=True),
|
||||
login_host = dict(default='localhost', type='str'),
|
||||
login_port = dict(default='15672', type='str'),
|
||||
vhost = dict(default='/', type='str'),
|
||||
destination = dict(required=True, aliases=[ "dst", "dest"], type='str'),
|
||||
destination_type = dict(required=True, aliases=[ "type", "dest_type"], choices=[ "queue", "exchange" ],type='str'),
|
||||
routing_key = dict(default='#', type='str'),
|
||||
arguments = dict(default=dict(), type='dict')
|
||||
),
|
||||
supports_check_mode = True
|
||||
)
|
||||
|
||||
if module.params['destination_type'] == "queue":
|
||||
dest_type="q"
|
||||
else:
|
||||
dest_type="e"
|
||||
|
||||
url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s/%s" % (
|
||||
module.params['login_host'],
|
||||
module.params['login_port'],
|
||||
urllib.quote(module.params['vhost'],''),
|
||||
module.params['name'],
|
||||
dest_type,
|
||||
module.params['destination'],
|
||||
urllib.quote(module.params['routing_key'],'')
|
||||
)
|
||||
|
||||
# Check if exchange already exists
|
||||
r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))
|
||||
|
||||
if r.status_code==200:
|
||||
binding_exists = True
|
||||
response = r.json()
|
||||
elif r.status_code==404:
|
||||
binding_exists = False
|
||||
response = r.text
|
||||
else:
|
||||
module.fail_json(
|
||||
msg = "Invalid response from RESTAPI when trying to check if exchange exists",
|
||||
details = r.text
|
||||
)
|
||||
|
||||
if module.params['state']=='present':
|
||||
change_required = not binding_exists
|
||||
else:
|
||||
change_required = binding_exists
|
||||
|
||||
# Exit if check_mode
|
||||
if module.check_mode:
|
||||
module.exit_json(
|
||||
changed= change_required,
|
||||
name = module.params['name'],
|
||||
details = response,
|
||||
arguments = module.params['arguments']
|
||||
)
|
||||
|
||||
# Do changes
|
||||
if change_required:
|
||||
if module.params['state'] == 'present':
|
||||
url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s" % (
|
||||
module.params['login_host'],
|
||||
module.params['login_port'],
|
||||
urllib.quote(module.params['vhost'],''),
|
||||
module.params['name'],
|
||||
dest_type,
|
||||
module.params['destination']
|
||||
)
|
||||
|
||||
r = requests.post(
|
||||
url,
|
||||
auth = (module.params['login_user'],module.params['login_password']),
|
||||
headers = { "content-type": "application/json"},
|
||||
data = json.dumps({
|
||||
"routing_key": module.params['routing_key'],
|
||||
"arguments": module.params['arguments']
|
||||
})
|
||||
)
|
||||
elif module.params['state'] == 'absent':
|
||||
r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password']))
|
||||
|
||||
if r.status_code == 204 or r.status_code == 201:
|
||||
module.exit_json(
|
||||
changed = True,
|
||||
name = module.params['name'],
|
||||
destination = module.params['destination']
|
||||
)
|
||||
else:
|
||||
module.fail_json(
|
||||
msg = "Error creating exchange",
|
||||
status = r.status_code,
|
||||
details = r.text
|
||||
)
|
||||
|
||||
else:
|
||||
module.exit_json(
|
||||
changed = False,
|
||||
name = module.params['name']
|
||||
)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
218
messaging/rabbitmq_exchange.py
Normal file
218
messaging/rabbitmq_exchange.py
Normal file
|
@ -0,0 +1,218 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rabbitmq_exchange
|
||||
author: '"Manuel Sousa (@manuel-sousa)" <manuel.sousa@gmail.com>'
|
||||
version_added: "2.0"
|
||||
|
||||
short_description: This module manages rabbitMQ exchanges
|
||||
description:
|
||||
- This module uses rabbitMQ Rest API to create/delete exchanges
|
||||
requirements: [ python requests ]
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the exchange to create
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Whether the exchange should be present or absent
|
||||
- Only present implemented atm
|
||||
choices: [ "present", "absent" ]
|
||||
required: false
|
||||
default: present
|
||||
login_user:
|
||||
description:
|
||||
- rabbitMQ user for connection
|
||||
required: false
|
||||
default: guest
|
||||
login_password:
|
||||
description:
|
||||
- rabbitMQ password for connection
|
||||
required: false
|
||||
default: false
|
||||
login_host:
|
||||
description:
|
||||
- rabbitMQ host for connection
|
||||
required: false
|
||||
default: localhost
|
||||
login_port:
|
||||
description:
|
||||
- rabbitMQ management api port
|
||||
required: false
|
||||
default: 15672
|
||||
vhost:
|
||||
description:
|
||||
- rabbitMQ virtual host
|
||||
required: false
|
||||
default: "/"
|
||||
durable:
|
||||
description:
|
||||
- whether exchange is durable or not
|
||||
required: false
|
||||
choices: [ "yes", "no" ]
|
||||
default: yes
|
||||
exchange_type:
|
||||
description:
|
||||
- type for the exchange
|
||||
required: false
|
||||
choices: [ "fanout", "direct", "headers", "topic" ]
|
||||
aliases: [ "type" ]
|
||||
default: direct
|
||||
auto_delete:
|
||||
description:
|
||||
- if the exchange should delete itself after all queues/exchanges unbound from it
|
||||
required: false
|
||||
choices: [ "yes", "no" ]
|
||||
default: no
|
||||
internal:
|
||||
description:
|
||||
- exchange is available only for other exchanges
|
||||
required: false
|
||||
choices: [ "yes", "no" ]
|
||||
default: no
|
||||
arguments:
|
||||
description:
|
||||
- extra arguments for exchange. If defined this argument is a key/value dictionary
|
||||
required: false
|
||||
default: {}
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create direct exchange
|
||||
- rabbitmq_exchange: name=directExchange
|
||||
|
||||
# Create topic exchange on vhost
|
||||
- rabbitmq_exchange: name=topicExchange type=topic vhost=myVhost
|
||||
'''
|
||||
|
||||
import requests
|
||||
import urllib
|
||||
import json
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
state = dict(default='present', choices=['present', 'absent'], type='str'),
|
||||
name = dict(required=True, type='str'),
|
||||
login_user = dict(default='guest', type='str'),
|
||||
login_password = dict(default='guest', type='str', no_log=True),
|
||||
login_host = dict(default='localhost', type='str'),
|
||||
login_port = dict(default='15672', type='str'),
|
||||
vhost = dict(default='/', type='str'),
|
||||
durable = dict(default=True, choices=BOOLEANS, type='bool'),
|
||||
auto_delete = dict(default=False, choices=BOOLEANS, type='bool'),
|
||||
internal = dict(default=False, choices=BOOLEANS, type='bool'),
|
||||
exchange_type = dict(default='direct', aliases=['type'], type='str'),
|
||||
arguments = dict(default=dict(), type='dict')
|
||||
),
|
||||
supports_check_mode = True
|
||||
)
|
||||
|
||||
url = "http://%s:%s/api/exchanges/%s/%s" % (
|
||||
module.params['login_host'],
|
||||
module.params['login_port'],
|
||||
urllib.quote(module.params['vhost'],''),
|
||||
module.params['name']
|
||||
)
|
||||
|
||||
# Check if exchange already exists
|
||||
r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))
|
||||
|
||||
if r.status_code==200:
|
||||
exchange_exists = True
|
||||
response = r.json()
|
||||
elif r.status_code==404:
|
||||
exchange_exists = False
|
||||
response = r.text
|
||||
else:
|
||||
module.fail_json(
|
||||
msg = "Invalid response from RESTAPI when trying to check if exchange exists",
|
||||
details = r.text
|
||||
)
|
||||
|
||||
if module.params['state']=='present':
|
||||
change_required = not exchange_exists
|
||||
else:
|
||||
change_required = exchange_exists
|
||||
|
||||
# Check if attributes change on existing exchange
|
||||
if not change_required and r.status_code==200 and module.params['state'] == 'present':
|
||||
if not (
|
||||
response['durable'] == module.params['durable'] and
|
||||
response['auto_delete'] == module.params['auto_delete'] and
|
||||
response['internal'] == module.params['internal'] and
|
||||
response['type'] == module.params['exchange_type']
|
||||
):
|
||||
module.fail_json(
|
||||
msg = "RabbitMQ RESTAPI doesn't support attribute changes for existing exchanges"
|
||||
)
|
||||
|
||||
# Exit if check_mode
|
||||
if module.check_mode:
|
||||
module.exit_json(
|
||||
changed= change_required,
|
||||
name = module.params['name'],
|
||||
details = response,
|
||||
arguments = module.params['arguments']
|
||||
)
|
||||
|
||||
# Do changes
|
||||
if change_required:
|
||||
if module.params['state'] == 'present':
|
||||
r = requests.put(
|
||||
url,
|
||||
auth = (module.params['login_user'],module.params['login_password']),
|
||||
headers = { "content-type": "application/json"},
|
||||
data = json.dumps({
|
||||
"durable": module.params['durable'],
|
||||
"auto_delete": module.params['auto_delete'],
|
||||
"internal": module.params['internal'],
|
||||
"type": module.params['exchange_type'],
|
||||
"arguments": module.params['arguments']
|
||||
})
|
||||
)
|
||||
elif module.params['state'] == 'absent':
|
||||
r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password']))
|
||||
|
||||
if r.status_code == 204:
|
||||
module.exit_json(
|
||||
changed = True,
|
||||
name = module.params['name']
|
||||
)
|
||||
else:
|
||||
module.fail_json(
|
||||
msg = "Error creating exchange",
|
||||
status = r.status_code,
|
||||
details = r.text
|
||||
)
|
||||
|
||||
else:
|
||||
module.exit_json(
|
||||
changed = False,
|
||||
name = module.params['name']
|
||||
)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
|
@ -25,7 +25,7 @@ short_description: Adds or removes parameters to RabbitMQ
|
|||
description:
|
||||
- Manage dynamic, cluster-wide parameters for RabbitMQ
|
||||
version_added: "1.1"
|
||||
author: Chris Hoffman
|
||||
author: '"Chris Hoffman (@chrishoffman)"'
|
||||
options:
|
||||
component:
|
||||
description:
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: Adds or removes plugins to RabbitMQ
|
|||
description:
|
||||
- Enables or disables RabbitMQ plugins
|
||||
version_added: "1.1"
|
||||
author: Chris Hoffman
|
||||
author: '"Chris Hoffman (@chrishoffman)"'
|
||||
options:
|
||||
names:
|
||||
description:
|
||||
|
|
|
@ -26,7 +26,7 @@ short_description: Manage the state of policies in RabbitMQ.
|
|||
description:
|
||||
- Manage the state of a virtual host in RabbitMQ.
|
||||
version_added: "1.5"
|
||||
author: John Dewey
|
||||
author: '"John Dewey (@retr0h)" <john@dewey.ws>'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
|
263
messaging/rabbitmq_queue.py
Normal file
263
messaging/rabbitmq_queue.py
Normal file
|
@ -0,0 +1,263 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rabbitmq_queue
|
||||
author: '"Manuel Sousa (@manuel-sousa)" <manuel.sousa@gmail.com>'
|
||||
version_added: "2.0"
|
||||
|
||||
short_description: This module manages rabbitMQ queues
|
||||
description:
|
||||
- This module uses rabbitMQ Rest API to create/delete queues
|
||||
requirements: [ python requests ]
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the queue to create
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Whether the queue should be present or absent
|
||||
- Only present implemented atm
|
||||
choices: [ "present", "absent" ]
|
||||
required: false
|
||||
default: present
|
||||
login_user:
|
||||
description:
|
||||
- rabbitMQ user for connection
|
||||
required: false
|
||||
default: guest
|
||||
login_password:
|
||||
description:
|
||||
- rabbitMQ password for connection
|
||||
required: false
|
||||
default: false
|
||||
login_host:
|
||||
description:
|
||||
- rabbitMQ host for connection
|
||||
required: false
|
||||
default: localhost
|
||||
login_port:
|
||||
description:
|
||||
- rabbitMQ management api port
|
||||
required: false
|
||||
default: 15672
|
||||
vhost:
|
||||
description:
|
||||
- rabbitMQ virtual host
|
||||
required: false
|
||||
default: "/"
|
||||
durable:
|
||||
description:
|
||||
- whether queue is durable or not
|
||||
required: false
|
||||
choices: [ "yes", "no" ]
|
||||
default: yes
|
||||
auto_delete:
|
||||
description:
|
||||
- if the queue should delete itself after all queues/queues unbound from it
|
||||
required: false
|
||||
choices: [ "yes", "no" ]
|
||||
default: no
|
||||
message_ttl:
|
||||
description:
|
||||
- How long a message can live in queue before it is discarded (milliseconds)
|
||||
required: False
|
||||
default: forever
|
||||
auto_expires:
|
||||
description:
|
||||
- How long a queue can be unused before it is automatically deleted (milliseconds)
|
||||
required: false
|
||||
default: forever
|
||||
max_length:
|
||||
description:
|
||||
- How many messages can the queue contain before it starts rejecting
|
||||
required: false
|
||||
default: no limit
|
||||
dead_letter_exchange:
|
||||
description:
|
||||
- Optional name of an exchange to which messages will be republished if they
|
||||
- are rejected or expire
|
||||
required: false
|
||||
default: None
|
||||
dead_letter_routing_key:
|
||||
description:
|
||||
- Optional replacement routing key to use when a message is dead-lettered.
|
||||
- Original routing key will be used if unset
|
||||
required: false
|
||||
default: None
|
||||
arguments:
|
||||
description:
|
||||
- extra arguments for queue. If defined this argument is a key/value dictionary
|
||||
required: false
|
||||
default: {}
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create a queue
|
||||
- rabbitmq_queue: name=myQueue
|
||||
|
||||
# Create a queue on remote host
|
||||
- rabbitmq_queue: name=myRemoteQueue login_user=user login_password=secret login_host=remote.example.org
|
||||
'''
|
||||
|
||||
import requests
|
||||
import urllib
|
||||
import json
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
state = dict(default='present', choices=['present', 'absent'], type='str'),
|
||||
name = dict(required=True, type='str'),
|
||||
login_user = dict(default='guest', type='str'),
|
||||
login_password = dict(default='guest', type='str', no_log=True),
|
||||
login_host = dict(default='localhost', type='str'),
|
||||
login_port = dict(default='15672', type='str'),
|
||||
vhost = dict(default='/', type='str'),
|
||||
durable = dict(default=True, choices=BOOLEANS, type='bool'),
|
||||
auto_delete = dict(default=False, choices=BOOLEANS, type='bool'),
|
||||
message_ttl = dict(default=None, type='int'),
|
||||
auto_expires = dict(default=None, type='int'),
|
||||
max_length = dict(default=None, type='int'),
|
||||
dead_letter_exchange = dict(default=None, type='str'),
|
||||
dead_letter_routing_key = dict(default=None, type='str'),
|
||||
arguments = dict(default=dict(), type='dict')
|
||||
),
|
||||
supports_check_mode = True
|
||||
)
|
||||
|
||||
url = "http://%s:%s/api/queues/%s/%s" % (
|
||||
module.params['login_host'],
|
||||
module.params['login_port'],
|
||||
urllib.quote(module.params['vhost'],''),
|
||||
module.params['name']
|
||||
)
|
||||
|
||||
# Check if queue already exists
|
||||
r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))
|
||||
|
||||
if r.status_code==200:
|
||||
queue_exists = True
|
||||
response = r.json()
|
||||
elif r.status_code==404:
|
||||
queue_exists = False
|
||||
response = r.text
|
||||
else:
|
||||
module.fail_json(
|
||||
msg = "Invalid response from RESTAPI when trying to check if queue exists",
|
||||
details = r.text
|
||||
)
|
||||
|
||||
if module.params['state']=='present':
|
||||
change_required = not queue_exists
|
||||
else:
|
||||
change_required = queue_exists
|
||||
|
||||
# Check if attributes change on existing queue
|
||||
if not change_required and r.status_code==200 and module.params['state'] == 'present':
|
||||
if not (
|
||||
response['durable'] == module.params['durable'] and
|
||||
response['auto_delete'] == module.params['auto_delete'] and
|
||||
(
|
||||
( 'x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl'] ) or
|
||||
( 'x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None )
|
||||
) and
|
||||
(
|
||||
( 'x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires'] ) or
|
||||
( 'x-expires' not in response['arguments'] and module.params['auto_expires'] is None )
|
||||
) and
|
||||
(
|
||||
( 'x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length'] ) or
|
||||
( 'x-max-length' not in response['arguments'] and module.params['max_length'] is None )
|
||||
) and
|
||||
(
|
||||
( 'x-dead-letter-exchange' in response['arguments'] and response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange'] ) or
|
||||
( 'x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None )
|
||||
) and
|
||||
(
|
||||
( 'x-dead-letter-routing-key' in response['arguments'] and response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key'] ) or
|
||||
( 'x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None )
|
||||
)
|
||||
):
|
||||
module.fail_json(
|
||||
msg = "RabbitMQ RESTAPI doesn't support attribute changes for existing queues",
|
||||
)
|
||||
|
||||
|
||||
# Copy parameters to arguments as used by RabbitMQ
|
||||
for k,v in {
|
||||
'message_ttl': 'x-message-ttl',
|
||||
'auto_expires': 'x-expires',
|
||||
'max_length': 'x-max-length',
|
||||
'dead_letter_exchange': 'x-dead-letter-exchange',
|
||||
'dead_letter_routing_key': 'x-dead-letter-routing-key'
|
||||
}.items():
|
||||
if module.params[k]:
|
||||
module.params['arguments'][v] = module.params[k]
|
||||
|
||||
# Exit if check_mode
|
||||
if module.check_mode:
|
||||
module.exit_json(
|
||||
changed= change_required,
|
||||
name = module.params['name'],
|
||||
details = response,
|
||||
arguments = module.params['arguments']
|
||||
)
|
||||
|
||||
# Do changes
|
||||
if change_required:
|
||||
if module.params['state'] == 'present':
|
||||
r = requests.put(
|
||||
url,
|
||||
auth = (module.params['login_user'],module.params['login_password']),
|
||||
headers = { "content-type": "application/json"},
|
||||
data = json.dumps({
|
||||
"durable": module.params['durable'],
|
||||
"auto_delete": module.params['auto_delete'],
|
||||
"arguments": module.params['arguments']
|
||||
})
|
||||
)
|
||||
elif module.params['state'] == 'absent':
|
||||
r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password']))
|
||||
|
||||
if r.status_code == 204:
|
||||
module.exit_json(
|
||||
changed = True,
|
||||
name = module.params['name']
|
||||
)
|
||||
else:
|
||||
module.fail_json(
|
||||
msg = "Error creating queue",
|
||||
status = r.status_code,
|
||||
details = r.text
|
||||
)
|
||||
|
||||
else:
|
||||
module.exit_json(
|
||||
changed = False,
|
||||
name = module.params['name']
|
||||
)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
|
@ -25,7 +25,7 @@ short_description: Adds or removes users to RabbitMQ
|
|||
description:
|
||||
- Add or remove users to RabbitMQ and assign permissions
|
||||
version_added: "1.1"
|
||||
author: Chris Hoffman
|
||||
author: '"Chris Hoffman (@chrishoffman)"'
|
||||
options:
|
||||
user:
|
||||
description:
|
||||
|
|
|
@ -26,7 +26,7 @@ short_description: Manage the state of a virtual host in RabbitMQ
|
|||
description:
|
||||
- Manage the state of a virtual host in RabbitMQ
|
||||
version_added: "1.1"
|
||||
author: Chris Hoffman
|
||||
author: '"Chris Hoffman (@choffman)"'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
|
|
@ -22,7 +22,7 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: airbrake_deployment
|
||||
version_added: "1.2"
|
||||
author: Bruce Pennypacker
|
||||
author: '"Bruce Pennypacker (@bpennypacker)" <bruce@pennypacker.org>'
|
||||
short_description: Notify airbrake about app deployments
|
||||
description:
|
||||
- Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking)
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: bigpanda
|
||||
author: BigPanda
|
||||
author: "Hagai Kariti (@hkariti)"
|
||||
short_description: Notify BigPanda about deployments
|
||||
version_added: "1.8"
|
||||
description:
|
||||
|
@ -162,7 +162,7 @@ def main():
|
|||
module.exit_json(changed=True, **deployment)
|
||||
else:
|
||||
module.fail_json(msg=json.dumps(info))
|
||||
except Exception as e:
|
||||
except Exception, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
# import module snippets
|
||||
|
|
|
@ -34,7 +34,7 @@ short_description: Manage boundary meters
|
|||
description:
|
||||
- This module manages boundary meters
|
||||
version_added: "1.3"
|
||||
author: curtis@serverascode.com
|
||||
author: '"curtis (@ccollicutt)" <curtis@serverascode.com>'
|
||||
requirements:
|
||||
- Boundary API access
|
||||
- bprobe is required to send data, but not to register a meter
|
||||
|
@ -213,7 +213,7 @@ def download_request(module, name, apiid, apikey, cert_type):
|
|||
cert_file = open(cert_file_path, 'w')
|
||||
cert_file.write(body)
|
||||
cert_file.close
|
||||
os.chmod(cert_file_path, 0o600)
|
||||
os.chmod(cert_file_path, 0600)
|
||||
except:
|
||||
module.fail_json("Could not write to certificate file")
|
||||
|
||||
|
|
132
monitoring/circonus_annotation.py
Normal file
132
monitoring/circonus_annotation.py
Normal file
|
@ -0,0 +1,132 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# (c) 2014-2015, Epic Games, Inc.
|
||||
|
||||
import requests
|
||||
import time
|
||||
import json
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: circonus_annotation
|
||||
short_description: create an annotation in circonus
|
||||
description:
|
||||
- Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided
|
||||
author: "Nick Harring (@NickatEpic)"
|
||||
version_added: 2.0
|
||||
requirements:
|
||||
- urllib3
|
||||
- requests
|
||||
- time
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Circonus API key
|
||||
required: true
|
||||
category:
|
||||
description:
|
||||
- Annotation Category
|
||||
required: true
|
||||
description:
|
||||
description:
|
||||
- Description of annotation
|
||||
required: true
|
||||
title:
|
||||
description:
|
||||
- Title of annotation
|
||||
required: true
|
||||
start:
|
||||
description:
|
||||
- Unix timestamp of event start, defaults to now
|
||||
required: false
|
||||
stop:
|
||||
description:
|
||||
- Unix timestamp of event end, defaults to now + duration
|
||||
required: false
|
||||
duration:
|
||||
description:
|
||||
- Duration in seconds of annotation, defaults to 0
|
||||
required: false
|
||||
'''
|
||||
EXAMPLES = '''
|
||||
# Create a simple annotation event with a source, defaults to start and end time of now
|
||||
- circonus_annotation:
|
||||
api_key: XXXXXXXXXXXXXXXXX
|
||||
title: 'App Config Change'
|
||||
description: 'This is a detailed description of the config change'
|
||||
category: 'This category groups like annotations'
|
||||
# Create an annotation with a duration of 5 minutes and a default start time of now
|
||||
- circonus_annotation:
|
||||
api_key: XXXXXXXXXXXXXXXXX
|
||||
title: 'App Config Change'
|
||||
description: 'This is a detailed description of the config change'
|
||||
category: 'This category groups like annotations'
|
||||
duration: 300
|
||||
# Create an annotation with a start_time and end_time
|
||||
- circonus_annotation:
|
||||
api_key: XXXXXXXXXXXXXXXXX
|
||||
title: 'App Config Change'
|
||||
description: 'This is a detailed description of the config change'
|
||||
category: 'This category groups like annotations'
|
||||
start_time: 1395940006
|
||||
end_time: 1395954407
|
||||
'''
|
||||
def post_annotation(annotation, api_key):
|
||||
''' Takes annotation dict and api_key string'''
|
||||
base_url = 'https://api.circonus.com/v2'
|
||||
anootate_post_endpoint = '/annotation'
|
||||
resp = requests.post(base_url + anootate_post_endpoint,
|
||||
headers=build_headers(api_key), data=json.dumps(annotation))
|
||||
resp.raise_for_status()
|
||||
return resp
|
||||
|
||||
def create_annotation(module):
|
||||
''' Takes ansible module object '''
|
||||
annotation = {}
|
||||
if module.params['duration'] != None:
|
||||
duration = module.params['duration']
|
||||
else:
|
||||
duration = 0
|
||||
if module.params['start'] != None:
|
||||
start = module.params['start']
|
||||
else:
|
||||
start = int(time.time())
|
||||
if module.params['stop'] != None:
|
||||
stop = module.params['stop']
|
||||
else:
|
||||
stop = int(time.time())+ duration
|
||||
annotation['start'] = int(start)
|
||||
annotation['stop'] = int(stop)
|
||||
annotation['category'] = module.params['category']
|
||||
annotation['description'] = module.params['description']
|
||||
annotation['title'] = module.params['title']
|
||||
return annotation
|
||||
def build_headers(api_token):
|
||||
'''Takes api token, returns headers with it included.'''
|
||||
headers = {'X-Circonus-App-Name': 'ansible',
|
||||
'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token,
|
||||
'Accept': 'application/json'}
|
||||
return headers
|
||||
|
||||
def main():
|
||||
'''Main function, dispatches logic'''
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
start=dict(required=False, type='int'),
|
||||
stop=dict(required=False, type='int'),
|
||||
category=dict(required=True),
|
||||
title=dict(required=True),
|
||||
description=dict(required=True),
|
||||
duration=dict(required=False, type='int'),
|
||||
api_key=dict(required=True)
|
||||
)
|
||||
)
|
||||
annotation = create_annotation(module)
|
||||
try:
|
||||
resp = post_annotation(annotation, module.params['api_key'])
|
||||
except requests.exceptions.RequestException, err_str:
|
||||
module.fail_json(msg='Request Failed', reason=err_str)
|
||||
module.exit_json(changed=True, annotation=resp.json())
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
|
@ -14,7 +14,7 @@ description:
|
|||
- "Allows to post events to DataDog (www.datadoghq.com) service."
|
||||
- "Uses http://docs.datadoghq.com/api/#events API."
|
||||
version_added: "1.3"
|
||||
author: Artūras 'arturaz' Šlajus <x11@arturaz.net>
|
||||
author: '"Artūras `arturaz` Šlajus (@arturaz)" <x11@arturaz.net>'
|
||||
notes: []
|
||||
requirements: [urllib2]
|
||||
options:
|
||||
|
|
|
@ -29,7 +29,7 @@ short_description: create an annotation in librato
|
|||
description:
|
||||
- Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically
|
||||
version_added: "1.6"
|
||||
author: Seth Edwards
|
||||
author: "Seth Edwards (@sedward)"
|
||||
requirements:
|
||||
- urllib2
|
||||
- base64
|
||||
|
@ -138,11 +138,11 @@ def post_annotation(module):
|
|||
|
||||
headers = {}
|
||||
headers['Content-Type'] = 'application/json'
|
||||
headers['Authorization'] = b"Basic " + base64.b64encode(user + b":" + api_key).strip()
|
||||
headers['Authorization'] = "Basic " + base64.b64encode(user + ":" + api_key).strip()
|
||||
req = urllib2.Request(url, json_body, headers)
|
||||
try:
|
||||
response = urllib2.urlopen(req)
|
||||
except urllib2.HTTPError as e:
|
||||
except urllib2.HTTPError, e:
|
||||
module.fail_json(msg="Request Failed", reason=e.reason)
|
||||
response = response.read()
|
||||
module.exit_json(changed=True, annotation=response)
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: logentries
|
||||
author: Ivan Vanderbyl
|
||||
short_description: Module for tracking logs via logentries.com
|
||||
author: '"Ivan Vanderbyl (@ivanvanderbyl)" <ivan@app.io>'
|
||||
short_description: Module for tracking logs via logentries.com
|
||||
description:
|
||||
- Sends logs to LogEntries in realtime
|
||||
version_added: "1.6"
|
||||
|
|
|
@ -39,7 +39,7 @@ options:
|
|||
default: null
|
||||
choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
|
||||
requirements: [ ]
|
||||
author: Darryl Stoflet
|
||||
author: '"Darryl Stoflet (@dstoflet)" <stoflet@gmail.com>'
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
# Tim Bielawa <tbielawa@redhat.com>
|
||||
#
|
||||
# This software may be freely redistributed under the terms of the GNU
|
||||
# general public license version 2.
|
||||
# general public license version 2 or any later version.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
@ -73,7 +73,7 @@ options:
|
|||
required: true
|
||||
default: null
|
||||
|
||||
author: Tim Bielawa
|
||||
author: '"Tim Bielawa (@tbielawa)" <tbielawa@redhat.com>'
|
||||
requirements: [ "Nagios" ]
|
||||
'''
|
||||
|
||||
|
@ -189,7 +189,7 @@ def main():
|
|||
services = module.params['services']
|
||||
cmdfile = module.params['cmdfile']
|
||||
command = module.params['command']
|
||||
|
||||
|
||||
##################################################################
|
||||
# Required args per action:
|
||||
# downtime = (minutes, service, host)
|
||||
|
@ -356,7 +356,7 @@ class Nagios(object):
|
|||
notif_str = "[%s] %s" % (entry_time, cmd)
|
||||
if host is not None:
|
||||
notif_str += ";%s" % host
|
||||
|
||||
|
||||
if svc is not None:
|
||||
notif_str += ";%s" % svc
|
||||
|
||||
|
@ -796,42 +796,42 @@ class Nagios(object):
|
|||
return return_str_list
|
||||
else:
|
||||
return "Fail: could not write to the command file"
|
||||
|
||||
|
||||
def silence_nagios(self):
|
||||
"""
|
||||
This command is used to disable notifications for all hosts and services
|
||||
in nagios.
|
||||
|
||||
|
||||
This is a 'SHUT UP, NAGIOS' command
|
||||
"""
|
||||
cmd = 'DISABLE_NOTIFICATIONS'
|
||||
self._write_command(self._fmt_notif_str(cmd))
|
||||
|
||||
|
||||
def unsilence_nagios(self):
|
||||
"""
|
||||
This command is used to enable notifications for all hosts and services
|
||||
in nagios.
|
||||
|
||||
|
||||
This is a 'OK, NAGIOS, GO'' command
|
||||
"""
|
||||
cmd = 'ENABLE_NOTIFICATIONS'
|
||||
self._write_command(self._fmt_notif_str(cmd))
|
||||
|
||||
|
||||
def nagios_cmd(self, cmd):
|
||||
"""
|
||||
This sends an arbitrary command to nagios
|
||||
|
||||
|
||||
It prepends the submitted time and appends a \n
|
||||
|
||||
|
||||
You just have to provide the properly formatted command
|
||||
"""
|
||||
|
||||
|
||||
pre = '[%s]' % int(time.time())
|
||||
|
||||
|
||||
post = '\n'
|
||||
cmdstr = '%s %s %s' % (pre, cmd, post)
|
||||
self._write_command(cmdstr)
|
||||
|
||||
|
||||
def act(self):
|
||||
"""
|
||||
Figure out what you want to do from ansible, and then do the
|
||||
|
@ -871,13 +871,13 @@ class Nagios(object):
|
|||
services=self.services)
|
||||
elif self.action == 'silence_nagios':
|
||||
self.silence_nagios()
|
||||
|
||||
|
||||
elif self.action == 'unsilence_nagios':
|
||||
self.unsilence_nagios()
|
||||
|
||||
|
||||
elif self.action == 'command':
|
||||
self.nagios_cmd(self.command)
|
||||
|
||||
|
||||
# wtf?
|
||||
else:
|
||||
self.module.fail_json(msg="unknown action specified: '%s'" % \
|
||||
|
|
|
@ -22,14 +22,14 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: newrelic_deployment
|
||||
version_added: "1.2"
|
||||
author: Matt Coddington
|
||||
author: '"Matt Coddington (@mcodd)" <coddington@gmail.com>'
|
||||
short_description: Notify newrelic about app deployments
|
||||
description:
|
||||
- Notify newrelic about app deployments (see http://newrelic.github.io/newrelic_api/NewRelicApi/Deployment.html)
|
||||
- Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api)
|
||||
options:
|
||||
token:
|
||||
description:
|
||||
- API token.
|
||||
- API token, to place in the x-api-key header.
|
||||
required: true
|
||||
app_name:
|
||||
description:
|
||||
|
@ -102,6 +102,7 @@ def main():
|
|||
environment=dict(required=False),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
),
|
||||
required_one_of=[['app_name', 'application_id']],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
|
|
|
@ -7,7 +7,10 @@ short_description: Create PagerDuty maintenance windows
|
|||
description:
|
||||
- This module will let you create PagerDuty maintenance windows
|
||||
version_added: "1.2"
|
||||
author: Justin Johns
|
||||
author:
|
||||
- "Andrew Newdigate (@suprememoocow)"
|
||||
- "Dylan Silva (@thaumos)"
|
||||
- "Justin Johns"
|
||||
requirements:
|
||||
- PagerDuty API access
|
||||
options:
|
||||
|
|
|
@ -7,7 +7,9 @@ short_description: Pause/unpause Pingdom alerts
|
|||
description:
|
||||
- This module will let you pause/unpause Pingdom alerts
|
||||
version_added: "1.2"
|
||||
author: Justin Johns
|
||||
author:
|
||||
- "Dylan Silva (@thaumos)"
|
||||
- "Justin Johns"
|
||||
requirements:
|
||||
- "This pingdom python library: https://github.com/mbabineau/pingdom-python"
|
||||
options:
|
||||
|
|
|
@ -22,7 +22,7 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: rollbar_deployment
|
||||
version_added: 1.6
|
||||
author: Max Riveiro
|
||||
author: '"Max Riveiro (@kavu)" <kavu13@gmail.com>'
|
||||
short_description: Notify Rollbar about app deployments
|
||||
description:
|
||||
- Notify Rollbar about app deployments
|
||||
|
|
|
@ -8,7 +8,7 @@ short_description: Send code deploy and annotation events to stackdriver
|
|||
description:
|
||||
- Send code deploy and annotation events to Stackdriver
|
||||
version_added: "1.6"
|
||||
author: Ben Whaley
|
||||
author: "Ben Whaley (@bwhaley)"
|
||||
options:
|
||||
key:
|
||||
description:
|
||||
|
|
|
@ -6,7 +6,7 @@ module: uptimerobot
|
|||
short_description: Pause and start Uptime Robot monitoring
|
||||
description:
|
||||
- This module will let you start and pause Uptime Robot Monitoring
|
||||
author: Nate Kingsley
|
||||
author: "Nate Kingsley (@nate-kingsley)"
|
||||
version_added: "1.9"
|
||||
requirements:
|
||||
- Valid Uptime Robot API Key
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2014, René Moser <mail@renemoser.net>
|
||||
#
|
||||
# (c) 2013-2014, Epic Games, Inc.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
|
@ -22,191 +22,188 @@
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: zabbix_group
|
||||
short_description: Add or remove a host group to Zabbix.
|
||||
short_description: Zabbix host groups creates/deletes
|
||||
description:
|
||||
- This module uses the Zabbix API to add and remove host groups.
|
||||
version_added: '1.8'
|
||||
requirements: [ 'zabbix-api' ]
|
||||
- Create host groups if they do not exist.
|
||||
- Delete existing host groups if they exist.
|
||||
version_added: "1.8"
|
||||
author:
|
||||
- "(@cove)"
|
||||
- "Tony Minfei Ding"
|
||||
- "Harrison Gu (@harrisongu)"
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- zabbix-api
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Whether the host group should be added or removed.
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
host_group:
|
||||
description:
|
||||
- Name of the host group to be added or removed.
|
||||
required: true
|
||||
default: null
|
||||
aliases: [ ]
|
||||
server_url:
|
||||
description:
|
||||
- Url of Zabbix server, with protocol (http or https) e.g.
|
||||
https://monitoring.example.com/zabbix. C(url) is an alias
|
||||
for C(server_url). If not set environment variable
|
||||
C(ZABBIX_SERVER_URL) is used.
|
||||
- Url of Zabbix server, with protocol (http or https).
|
||||
C(url) is an alias for C(server_url).
|
||||
required: true
|
||||
default: null
|
||||
aliases: [ 'url' ]
|
||||
aliases: [ "url" ]
|
||||
login_user:
|
||||
description:
|
||||
- Zabbix user name. If not set environment variable
|
||||
C(ZABBIX_LOGIN_USER) is used.
|
||||
- Zabbix user name.
|
||||
required: true
|
||||
default: null
|
||||
login_password:
|
||||
description:
|
||||
- Zabbix user password. If not set environment variable
|
||||
C(ZABBIX_LOGIN_PASSWORD) is used.
|
||||
- Zabbix user password.
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Create or delete host group.
|
||||
required: false
|
||||
default: "present"
|
||||
choices: [ "present", "absent" ]
|
||||
timeout:
|
||||
description:
|
||||
- The timeout of API request(seconds).
|
||||
default: 10
|
||||
host_groups:
|
||||
description:
|
||||
- List of host groups to create or delete.
|
||||
required: true
|
||||
aliases: [ "host_group" ]
|
||||
notes:
|
||||
- The module has been tested with Zabbix Server 2.2.
|
||||
author: René Moser
|
||||
- Too many concurrent updates to the same group may cause Zabbix to return errors, see examples for a workaround if needed.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Add a new host group to Zabbix
|
||||
- zabbix_group: host_group='Linux servers'
|
||||
server_url=https://monitoring.example.com/zabbix
|
||||
login_user=ansible
|
||||
login_password=secure
|
||||
# Base create host groups example
|
||||
- name: Create host groups
|
||||
local_action:
|
||||
module: zabbix_group
|
||||
server_url: http://monitor.example.com
|
||||
login_user: username
|
||||
login_password: password
|
||||
state: present
|
||||
host_groups:
|
||||
- Example group1
|
||||
- Example group2
|
||||
|
||||
# Add a new host group, login data is provided by environment variables:
|
||||
# ZABBIX_LOGIN_USER, ZABBIX_LOGIN_PASSWORD, ZABBIX_SERVER_URL:
|
||||
- zabbix_group: host_group=Webservers
|
||||
|
||||
# Remove a host group from Zabbix
|
||||
- zabbix_group: host_group='Linux servers'
|
||||
state=absent
|
||||
server_url=https://monitoring.example.com/zabbix
|
||||
login_user=ansible
|
||||
login_password=secure
|
||||
# Limit the Zabbix group creations to one host since Zabbix can return an error when doing concurent updates
|
||||
- name: Create host groups
|
||||
local_action:
|
||||
module: zabbix_group
|
||||
server_url: http://monitor.example.com
|
||||
login_user: username
|
||||
login_password: password
|
||||
state: present
|
||||
host_groups:
|
||||
- Example group1
|
||||
- Example group2
|
||||
when: inventory_hostname==groups['group_name'][0]
|
||||
'''
|
||||
|
||||
try:
|
||||
from zabbix_api import ZabbixAPI
|
||||
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
|
||||
from zabbix_api import Already_Exists
|
||||
|
||||
HAS_ZABBIX_API = True
|
||||
except ImportError:
|
||||
HAS_ZABBIX_API = False
|
||||
|
||||
|
||||
def create_group(zbx, host_group):
|
||||
try:
|
||||
result = zbx.hostgroup.create(
|
||||
{
|
||||
'name': host_group
|
||||
}
|
||||
)
|
||||
except BaseException as e:
|
||||
return 1, None, str(e)
|
||||
return 0, result['groupids'], None
|
||||
class HostGroup(object):
|
||||
def __init__(self, module, zbx):
|
||||
self._module = module
|
||||
self._zapi = zbx
|
||||
|
||||
# create host group(s) if not exists
|
||||
def create_host_group(self, group_names):
|
||||
try:
|
||||
group_add_list = []
|
||||
for group_name in group_names:
|
||||
result = self._zapi.hostgroup.exists({'name': group_name})
|
||||
if not result:
|
||||
try:
|
||||
if self._module.check_mode:
|
||||
self._module.exit_json(changed=True)
|
||||
self._zapi.hostgroup.create({'name': group_name})
|
||||
group_add_list.append(group_name)
|
||||
except Already_Exists:
|
||||
return group_add_list
|
||||
return group_add_list
|
||||
except Exception, e:
|
||||
self._module.fail_json(msg="Failed to create host group(s): %s" % e)
|
||||
|
||||
def get_group(zbx, host_group):
|
||||
try:
|
||||
result = zbx.hostgroup.get(
|
||||
{
|
||||
'filter':
|
||||
{
|
||||
'name': host_group,
|
||||
}
|
||||
}
|
||||
)
|
||||
except BaseException as e:
|
||||
return 1, None, str(e)
|
||||
# delete host group(s)
|
||||
def delete_host_group(self, group_ids):
|
||||
try:
|
||||
if self._module.check_mode:
|
||||
self._module.exit_json(changed=True)
|
||||
self._zapi.hostgroup.delete(group_ids)
|
||||
except Exception, e:
|
||||
self._module.fail_json(msg="Failed to delete host group(s), Exception: %s" % e)
|
||||
|
||||
return 0, result[0]['groupid'], None
|
||||
# get group ids by name
|
||||
def get_group_ids(self, host_groups):
|
||||
group_ids = []
|
||||
|
||||
|
||||
def delete_group(zbx, group_id):
|
||||
try:
|
||||
zbx.hostgroup.delete([ group_id ])
|
||||
except BaseException as e:
|
||||
return 1, None, str(e)
|
||||
return 0, None, None
|
||||
|
||||
|
||||
def check_group(zbx, host_group):
|
||||
try:
|
||||
result = zbx.hostgroup.exists(
|
||||
{
|
||||
'name': host_group
|
||||
}
|
||||
)
|
||||
except BaseException as e:
|
||||
return 1, None, str(e)
|
||||
return 0, result, None
|
||||
group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': host_groups}})
|
||||
for group in group_list:
|
||||
group_id = group['groupid']
|
||||
group_ids.append(group_id)
|
||||
return group_ids, group_list
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
host_group=dict(required=True, default=None),
|
||||
server_url=dict(default=None, aliases=['url']),
|
||||
login_user=dict(default=None),
|
||||
login_password=dict(default=None),
|
||||
server_url=dict(required=True, aliases=['url']),
|
||||
login_user=dict(required=True),
|
||||
login_password=dict(required=True, no_log=True),
|
||||
host_groups=dict(required=True, aliases=['host_group']),
|
||||
state=dict(default="present", choices=['present','absent']),
|
||||
timeout=dict(type='int', default=10)
|
||||
),
|
||||
supports_check_mode=True,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if not HAS_ZABBIX_API:
|
||||
module.fail_json(msg='Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)')
|
||||
module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
|
||||
|
||||
try:
|
||||
login_user = module.params['login_user'] or os.environ['ZABBIX_LOGIN_USER']
|
||||
login_password = module.params['login_password'] or os.environ['ZABBIX_LOGIN_PASSWORD']
|
||||
server_url = module.params['server_url'] or os.environ['ZABBIX_SERVER_URL']
|
||||
except KeyError, e:
|
||||
module.fail_json(msg='Missing login data: %s is not set.' % e.message)
|
||||
|
||||
host_group = module.params['host_group']
|
||||
server_url = module.params['server_url']
|
||||
login_user = module.params['login_user']
|
||||
login_password = module.params['login_password']
|
||||
host_groups = module.params['host_groups']
|
||||
state = module.params['state']
|
||||
timeout = module.params['timeout']
|
||||
|
||||
zbx = None
|
||||
|
||||
# login to zabbix
|
||||
try:
|
||||
zbx = ZabbixAPI(server_url)
|
||||
zbx = ZabbixAPI(server_url, timeout=timeout)
|
||||
zbx.login(login_user, login_password)
|
||||
except BaseException as e:
|
||||
module.fail_json(msg='Failed to connect to Zabbix server: %s' % e)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
|
||||
|
||||
changed = False
|
||||
msg = ''
|
||||
hostGroup = HostGroup(module, zbx)
|
||||
|
||||
if state == 'present':
|
||||
(rc, exists, error) = check_group(zbx, host_group)
|
||||
if rc != 0:
|
||||
module.fail_json(msg='Failed to check host group %s existance: %s' % (host_group, error))
|
||||
if not exists:
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
else:
|
||||
(rc, group, error) = create_group(zbx, host_group)
|
||||
if rc == 0:
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg='Failed to get host group: %s' % error)
|
||||
group_ids = []
|
||||
group_list = []
|
||||
if host_groups:
|
||||
group_ids, group_list = hostGroup.get_group_ids(host_groups)
|
||||
|
||||
if state == 'absent':
|
||||
(rc, exists, error) = check_group(zbx, host_group)
|
||||
if rc != 0:
|
||||
module.fail_json(msg='Failed to check host group %s existance: %s' % (host_group, error))
|
||||
if exists:
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
else:
|
||||
(rc, group_id, error) = get_group(zbx, host_group)
|
||||
if rc != 0:
|
||||
module.fail_json(msg='Failed to get host group: %s' % error)
|
||||
|
||||
(rc, _, error) = delete_group(zbx, group_id)
|
||||
if rc == 0:
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg='Failed to remove host group: %s' % error)
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
if state == "absent":
|
||||
# delete host groups
|
||||
if group_ids:
|
||||
delete_group_names = []
|
||||
hostGroup.delete_host_group(group_ids)
|
||||
for group in group_list:
|
||||
delete_group_names.append(group['name'])
|
||||
module.exit_json(changed=True,
|
||||
result="Successfully deleted host group(s): %s." % ",".join(delete_group_names))
|
||||
else:
|
||||
module.exit_json(changed=False, result="No host group(s) to delete.")
|
||||
else:
|
||||
# create host groups
|
||||
group_add_list = hostGroup.create_host_group(host_groups)
|
||||
if len(group_add_list) > 0:
|
||||
module.exit_json(changed=True, result="Successfully created host group(s): %s" % group_add_list)
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
||||
|
|
|
@ -26,9 +26,13 @@ short_description: Zabbix host creates/updates/deletes
|
|||
description:
|
||||
- This module allows you to create, modify and delete Zabbix host entries and associated group and template data.
|
||||
version_added: "2.0"
|
||||
author: Tony Minfei Ding, Harrison Gu
|
||||
author:
|
||||
- "(@cove)"
|
||||
- "Tony Minfei Ding"
|
||||
- "Harrison Gu (@harrisongu)"
|
||||
requirements:
|
||||
- zabbix-api python module
|
||||
- "python >= 2.6"
|
||||
- zabbix-api
|
||||
options:
|
||||
server_url:
|
||||
description:
|
||||
|
@ -59,24 +63,28 @@ options:
|
|||
default: None
|
||||
status:
|
||||
description:
|
||||
- 'Monitoring status of the host. Possible values are: "enabled" and "disabled".'
|
||||
- Monitoring status of the host.
|
||||
required: false
|
||||
choices: ['enabled', 'disabled']
|
||||
default: "enabled"
|
||||
state:
|
||||
description:
|
||||
- 'Possible values are: "present" and "absent". If the host already exists, and the state is "present", it will just to update the host is the associated data is different. "absent" will remove a host if it exists.'
|
||||
- State of the host.
|
||||
- On C(present), it will create if host does not exist or update the host if the associated data is different.
|
||||
- On C(absent) will remove a host if it exists.
|
||||
required: false
|
||||
choices: ['present', 'absent']
|
||||
default: "present"
|
||||
timeout:
|
||||
description:
|
||||
- The timeout of API request(seconds).
|
||||
- The timeout of API request (seconds).
|
||||
default: 10
|
||||
interfaces:
|
||||
description:
|
||||
- List of interfaces to be created for the host (see example below).
|
||||
- 'Available values are: dns, ip, main, port, type and useip.'
|
||||
- Please review the interface documentation for more information on the supported properties
|
||||
- https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface
|
||||
- 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface'
|
||||
required: false
|
||||
default: []
|
||||
'''
|
||||
|
@ -114,7 +122,6 @@ EXAMPLES = '''
|
|||
|
||||
import logging
|
||||
import copy
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
try:
|
||||
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
|
||||
|
@ -335,13 +342,13 @@ def main():
|
|||
argument_spec=dict(
|
||||
server_url=dict(required=True, aliases=['url']),
|
||||
login_user=dict(required=True),
|
||||
login_password=dict(required=True),
|
||||
login_password=dict(required=True, no_log=True),
|
||||
host_name=dict(required=True),
|
||||
host_groups=dict(required=False),
|
||||
link_templates=dict(required=False),
|
||||
status=dict(default="enabled"),
|
||||
state=dict(default="present"),
|
||||
timeout=dict(default=10),
|
||||
status=dict(default="enabled", choices=['enabled', 'disabled']),
|
||||
state=dict(default="present", choices=['present', 'absent']),
|
||||
timeout=dict(type='int', default=10),
|
||||
interfaces=dict(required=False)
|
||||
),
|
||||
supports_check_mode=True
|
||||
|
|
|
@ -26,9 +26,12 @@ short_description: Zabbix host macro creates/updates/deletes
|
|||
description:
|
||||
- manages Zabbix host macros, it can create, update or delete them.
|
||||
version_added: "2.0"
|
||||
author: Dean Hailin Song
|
||||
author:
|
||||
- "(@cave)"
|
||||
- Dean Hailin Song
|
||||
requirements:
|
||||
- zabbix-api python module
|
||||
- "python >= 2.6"
|
||||
- zabbix-api
|
||||
options:
|
||||
server_url:
|
||||
description:
|
||||
|
@ -57,12 +60,15 @@ options:
|
|||
required: true
|
||||
state:
|
||||
description:
|
||||
- 'Possible values are: "present" and "absent". If the macro already exists, and the state is "present", it will just to update the macro if needed.'
|
||||
- State of the macro.
|
||||
- On C(present), it will create if macro does not exist or update the macro if the associated data is different.
|
||||
- On C(absent) will remove a macro if it exists.
|
||||
required: false
|
||||
choices: ['present', 'absent']
|
||||
default: "present"
|
||||
timeout:
|
||||
description:
|
||||
- The timeout of API request(seconds).
|
||||
- The timeout of API request (seconds).
|
||||
default: 10
|
||||
'''
|
||||
|
||||
|
@ -81,7 +87,6 @@ EXAMPLES = '''
|
|||
|
||||
import logging
|
||||
import copy
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
try:
|
||||
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
|
||||
|
@ -168,12 +173,12 @@ def main():
|
|||
argument_spec=dict(
|
||||
server_url=dict(required=True, aliases=['url']),
|
||||
login_user=dict(required=True),
|
||||
login_password=dict(required=True),
|
||||
login_password=dict(required=True, no_log=True),
|
||||
host_name=dict(required=True),
|
||||
macro_name=dict(required=True),
|
||||
macro_value=dict(required=True),
|
||||
state=dict(default="present"),
|
||||
timeout=dict(default=10)
|
||||
state=dict(default="present", choices=['present', 'absent']),
|
||||
timeout=dict(type='int', default=10)
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
|
|
@ -26,9 +26,10 @@ short_description: Create Zabbix maintenance windows
|
|||
description:
|
||||
- This module will let you create Zabbix maintenance windows.
|
||||
version_added: "1.8"
|
||||
author: Alexander Bulimov
|
||||
author: '"Alexander Bulimov (@abulimov)" <lazywolf0@gmail.com>'
|
||||
requirements:
|
||||
- zabbix-api python module
|
||||
- "python >= 2.6"
|
||||
- zabbix-api
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
|
@ -47,12 +48,10 @@ options:
|
|||
description:
|
||||
- Zabbix user name.
|
||||
required: true
|
||||
default: null
|
||||
login_password:
|
||||
description:
|
||||
- Zabbix user password.
|
||||
required: true
|
||||
default: null
|
||||
host_names:
|
||||
description:
|
||||
- Hosts to manage maintenance window for.
|
||||
|
@ -82,7 +81,6 @@ options:
|
|||
description:
|
||||
- Unique name of maintenance window.
|
||||
required: true
|
||||
default: null
|
||||
desc:
|
||||
description:
|
||||
- Short description of maintenance window.
|
||||
|
@ -272,9 +270,9 @@ def main():
|
|||
host_names=dict(type='list', required=False, default=None, aliases=['host_name']),
|
||||
minutes=dict(type='int', required=False, default=10),
|
||||
host_groups=dict(type='list', required=False, default=None, aliases=['host_group']),
|
||||
login_user=dict(required=True, default=None),
|
||||
login_password=dict(required=True, default=None),
|
||||
name=dict(required=True, default=None),
|
||||
login_user=dict(required=True),
|
||||
login_password=dict(required=True, no_log=True),
|
||||
name=dict(required=True),
|
||||
desc=dict(required=False, default="Created by Ansible"),
|
||||
collect_data=dict(type='bool', required=False, default=True),
|
||||
),
|
||||
|
|
|
@ -27,9 +27,13 @@ short_description: Zabbix screen creates/updates/deletes
|
|||
description:
|
||||
- This module allows you to create, modify and delete Zabbix screens and associated graph data.
|
||||
version_added: "2.0"
|
||||
author: Tony Minfei Ding, Harrison Gu
|
||||
author:
|
||||
- "(@cove)"
|
||||
- "Tony Minfei Ding"
|
||||
- "Harrison Gu (@harrisongu)"
|
||||
requirements:
|
||||
- zabbix-api python module
|
||||
- "python >= 2.6"
|
||||
- zabbix-api
|
||||
options:
|
||||
server_url:
|
||||
description:
|
||||
|
@ -46,15 +50,15 @@ options:
|
|||
required: true
|
||||
timeout:
|
||||
description:
|
||||
- The timeout of API request(seconds).
|
||||
- The timeout of API request (seconds).
|
||||
default: 10
|
||||
zabbix_screens:
|
||||
description:
|
||||
- List of screens to be created/updated/deleted(see example).
|
||||
- If the screen(s) already been added, the screen(s) name won't be updated.
|
||||
- When creating or updating screen(s), the screen_name, host_group are required.
|
||||
- When deleting screen(s), the screen_name is required.
|
||||
- 'The available states are: present(default) and absent. If the screen(s) already exists, and the state is not "absent", the screen(s) will just be updated as needed.'
|
||||
- When creating or updating screen(s), C(screen_name), C(host_group) are required.
|
||||
- When deleting screen(s), the C(screen_name) is required.
|
||||
- 'The available states are: C(present) (default) and C(absent). If the screen(s) already exists, and the state is not C(absent), the screen(s) will just be updated as needed.'
|
||||
required: true
|
||||
notes:
|
||||
- Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed.
|
||||
|
@ -123,8 +127,6 @@ EXAMPLES = '''
|
|||
when: inventory_hostname==groups['group_name'][0]
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
try:
|
||||
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
|
||||
from zabbix_api import ZabbixAPIException
|
||||
|
@ -315,9 +317,9 @@ def main():
|
|||
argument_spec=dict(
|
||||
server_url=dict(required=True, aliases=['url']),
|
||||
login_user=dict(required=True),
|
||||
login_password=dict(required=True),
|
||||
timeout=dict(default=10),
|
||||
screens=dict(required=True)
|
||||
login_password=dict(required=True, no_log=True),
|
||||
timeout=dict(type='int', default=10),
|
||||
screens=dict(type='dict', required=True)
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
@ -411,5 +413,7 @@ def main():
|
|||
else:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
# <<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
main()
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -28,7 +28,7 @@ version_added: 1.8
|
|||
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices
|
||||
description:
|
||||
- Manage slb server objects on A10 Networks devices via aXAPI
|
||||
author: Mischa Peters
|
||||
author: '"Mischa Peters (@mischapeters)" <mpeters@a10networks.com>'
|
||||
notes:
|
||||
- Requires A10 Networks aXAPI 2.1
|
||||
options:
|
||||
|
|
|
@ -28,7 +28,7 @@ version_added: 1.8
|
|||
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices
|
||||
description:
|
||||
- Manage slb service-group objects on A10 Networks devices via aXAPI
|
||||
author: Mischa Peters
|
||||
author: '"Mischa Peters (@mischapeters)" <mpeters@a10networks.com>'
|
||||
notes:
|
||||
- Requires A10 Networks aXAPI 2.1
|
||||
- When a server doesn't exist and is added to the service-group the server will be created
|
||||
|
|
|
@ -28,7 +28,7 @@ version_added: 1.8
|
|||
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices
|
||||
description:
|
||||
- Manage slb virtual server objects on A10 Networks devices via aXAPI
|
||||
author: Mischa Peters
|
||||
author: '"Mischa Peters (@mischapeters)" <mpeters@a10networks.com>'
|
||||
notes:
|
||||
- Requires A10 Networks aXAPI 2.1
|
||||
requirements:
|
||||
|
|
|
@ -82,7 +82,7 @@ options:
|
|||
choices: ['yes', 'no']
|
||||
|
||||
requirements: [ "urllib", "urllib2" ]
|
||||
author: Nandor Sivok
|
||||
author: '"Nandor Sivok (@dominis)" <nandor@gawker.com>'
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
2
network/dnsimple.py
Executable file → Normal file
2
network/dnsimple.py
Executable file → Normal file
|
@ -93,7 +93,7 @@ options:
|
|||
default: null
|
||||
|
||||
requirements: [ dnsimple ]
|
||||
author: Alex Coomans
|
||||
author: "Alex Coomans (@drcapulet)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -87,7 +87,7 @@ notes:
|
|||
- This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks.
|
||||
|
||||
requirements: [ urllib, urllib2, hashlib, hmac ]
|
||||
author: Brice Burgess
|
||||
author: "Brice Burgess (@briceburg)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
8
network/f5/bigip_facts.py
Executable file → Normal file
8
network/f5/bigip_facts.py
Executable file → Normal file
|
@ -25,7 +25,7 @@ short_description: "Collect facts from F5 BIG-IP devices"
|
|||
description:
|
||||
- "Collect facts from F5 BIG-IP devices via iControl SOAP API"
|
||||
version_added: "1.6"
|
||||
author: Matt Hite
|
||||
author: '"Matt Hite (@mhite)" <mhite@hotmail.com>'
|
||||
notes:
|
||||
- "Requires BIG-IP software version >= 11.4"
|
||||
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
|
||||
|
@ -1684,6 +1684,8 @@ def main():
|
|||
module.exit_json(**result)
|
||||
|
||||
# include magic from lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
main()
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ short_description: "Manages F5 BIG-IP LTM http monitors"
|
|||
description:
|
||||
- "Manages F5 BIG-IP LTM monitors via iControl SOAP API"
|
||||
version_added: "1.4"
|
||||
author: Serge van Ginderachter
|
||||
author: '"Serge van Ginderachter (@srvg)" <serge@vanginderachter.be>'
|
||||
notes:
|
||||
- "Requires BIG-IP software version >= 11"
|
||||
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM tcp monitors"
|
|||
description:
|
||||
- "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API"
|
||||
version_added: "1.4"
|
||||
author: Serge van Ginderachter
|
||||
author: '"Serge van Ginderachter (@srvg)" <serge@vanginderachter.be>'
|
||||
notes:
|
||||
- "Requires BIG-IP software version >= 11"
|
||||
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM nodes"
|
|||
description:
|
||||
- "Manages F5 BIG-IP LTM nodes via iControl SOAP API"
|
||||
version_added: "1.4"
|
||||
author: Matt Hite
|
||||
author: '"Matt Hite (@mhite)" <mhite@hotmail.com>'
|
||||
notes:
|
||||
- "Requires BIG-IP software version >= 11"
|
||||
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
|
||||
|
@ -67,7 +67,23 @@ options:
|
|||
- Pool member state
|
||||
required: true
|
||||
default: present
|
||||
choices: ['present', 'absent', 'enabled', 'disabled']
|
||||
choices: ['present', 'absent']
|
||||
aliases: []
|
||||
session_state:
|
||||
description:
|
||||
- Set new session availability status for node
|
||||
version_added: "1.9"
|
||||
required: false
|
||||
default: null
|
||||
choices: ['enabled', 'disabled']
|
||||
aliases: []
|
||||
monitor_state:
|
||||
description:
|
||||
- Set monitor availability status for node
|
||||
version_added: "1.9"
|
||||
required: false
|
||||
default: null
|
||||
choices: ['enabled', 'disabled']
|
||||
aliases: []
|
||||
partition:
|
||||
description:
|
||||
|
@ -78,7 +94,7 @@ options:
|
|||
aliases: []
|
||||
name:
|
||||
description:
|
||||
- "Node name. Required when state=enabled/disabled"
|
||||
- "Node name"
|
||||
required: false
|
||||
default: null
|
||||
choices: []
|
||||
|
@ -145,10 +161,30 @@ EXAMPLES = '''
|
|||
partition=matthite
|
||||
name="{{ ansible_default_ipv4["address"] }}"
|
||||
|
||||
- name: Disable node
|
||||
bigip_node: server=lb.mydomain.com user=admin password=mysecret
|
||||
state=disabled name=mynodename
|
||||
delegate_to: localhost
|
||||
# The BIG-IP GUI doesn't map directly to the API calls for "Node ->
|
||||
# General Properties -> State". The following states map to API monitor
|
||||
# and session states.
|
||||
#
|
||||
# Enabled (all traffic allowed):
|
||||
# monitor_state=enabled, session_state=enabled
|
||||
# Disabled (only persistent or active connections allowed):
|
||||
# monitor_state=enabled, session_state=disabled
|
||||
# Forced offline (only active connections allowed):
|
||||
# monitor_state=disabled, session_state=disabled
|
||||
#
|
||||
# See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down
|
||||
|
||||
- name: Force node offline
|
||||
local_action: >
|
||||
bigip_node
|
||||
server=lb.mydomain.com
|
||||
user=admin
|
||||
password=mysecret
|
||||
state=present
|
||||
session_state=disabled
|
||||
monitor_state=disabled
|
||||
partition=matthite
|
||||
name="{{ ansible_default_ipv4["address"] }}"
|
||||
|
||||
'''
|
||||
|
||||
|
@ -163,13 +199,6 @@ else:
|
|||
# bigip_node module specific
|
||||
#
|
||||
|
||||
# map of state values
|
||||
STATES={'enabled': 'STATE_ENABLED',
|
||||
'disabled': 'STATE_DISABLED'}
|
||||
STATUSES={'enabled': 'SESSION_STATUS_ENABLED',
|
||||
'disabled': 'SESSION_STATUS_DISABLED',
|
||||
'offline': 'SESSION_STATUS_FORCED_DISABLED'}
|
||||
|
||||
def bigip_api(bigip, user, password):
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
||||
return api
|
||||
|
@ -227,29 +256,31 @@ def delete_node_address(api, address):
|
|||
|
||||
def set_node_description(api, name, description):
|
||||
api.LocalLB.NodeAddressV2.set_description(nodes=[name],
|
||||
descriptions=[description])
|
||||
descriptions=[description])
|
||||
|
||||
def get_node_description(api, name):
|
||||
return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0]
|
||||
|
||||
def set_node_disabled(api, name):
|
||||
set_node_session_enabled_state(api, name, STATES['disabled'])
|
||||
result = True
|
||||
desc = ""
|
||||
return (result, desc)
|
||||
|
||||
def set_node_enabled(api, name):
|
||||
set_node_session_enabled_state(api, name, STATES['enabled'])
|
||||
result = True
|
||||
desc = ""
|
||||
return (result, desc)
|
||||
|
||||
def set_node_session_enabled_state(api, name, state):
|
||||
def set_node_session_enabled_state(api, name, session_state):
|
||||
session_state = "STATE_%s" % session_state.strip().upper()
|
||||
api.LocalLB.NodeAddressV2.set_session_enabled_state(nodes=[name],
|
||||
states=[state])
|
||||
states=[session_state])
|
||||
|
||||
def get_node_session_status(api, name):
|
||||
return api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0]
|
||||
result = api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0]
|
||||
result = result.split("SESSION_STATUS_")[-1].lower()
|
||||
return result
|
||||
|
||||
def set_node_monitor_state(api, name, monitor_state):
|
||||
monitor_state = "STATE_%s" % monitor_state.strip().upper()
|
||||
api.LocalLB.NodeAddressV2.set_monitor_state(nodes=[name],
|
||||
states=[monitor_state])
|
||||
|
||||
def get_node_monitor_status(api, name):
|
||||
result = api.LocalLB.NodeAddressV2.get_monitor_status(nodes=[name])[0]
|
||||
result = result.split("MONITOR_STATUS_")[-1].lower()
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
|
@ -258,8 +289,9 @@ def main():
|
|||
user = dict(type='str', required=True),
|
||||
password = dict(type='str', required=True),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
state = dict(type='str', default='present',
|
||||
choices=['present', 'absent', 'disabled', 'enabled']),
|
||||
state = dict(type='str', default='present', choices=['present', 'absent']),
|
||||
session_state = dict(type='str', choices=['enabled', 'disabled']),
|
||||
monitor_state = dict(type='str', choices=['enabled', 'disabled']),
|
||||
partition = dict(type='str', default='Common'),
|
||||
name = dict(type='str', required=True),
|
||||
host = dict(type='str', aliases=['address', 'ip']),
|
||||
|
@ -276,6 +308,8 @@ def main():
|
|||
password = module.params['password']
|
||||
validate_certs = module.params['validate_certs']
|
||||
state = module.params['state']
|
||||
session_state = module.params['session_state']
|
||||
monitor_state = module.params['monitor_state']
|
||||
partition = module.params['partition']
|
||||
host = module.params['host']
|
||||
name = module.params['name']
|
||||
|
@ -315,6 +349,13 @@ def main():
|
|||
module.fail_json(msg="unable to create: %s" % desc)
|
||||
else:
|
||||
result = {'changed': True}
|
||||
if session_state is not None:
|
||||
set_node_session_enabled_state(api, address,
|
||||
session_state)
|
||||
result = {'changed': True}
|
||||
if monitor_state is not None:
|
||||
set_node_monitor_state(api, address, monitor_state)
|
||||
result = {'changed': True}
|
||||
if description is not None:
|
||||
set_node_description(api, address, description)
|
||||
result = {'changed': True}
|
||||
|
@ -328,38 +369,40 @@ def main():
|
|||
module.fail_json(msg="Changing the node address is " \
|
||||
"not supported by the API; " \
|
||||
"delete and recreate the node.")
|
||||
if session_state is not None:
|
||||
session_status = get_node_session_status(api, address)
|
||||
if session_state == 'enabled' and \
|
||||
session_status == 'forced_disabled':
|
||||
if not module.check_mode:
|
||||
set_node_session_enabled_state(api, address,
|
||||
session_state)
|
||||
result = {'changed': True}
|
||||
elif session_state == 'disabled' and \
|
||||
session_status != 'force_disabled':
|
||||
if not module.check_mode:
|
||||
set_node_session_enabled_state(api, address,
|
||||
session_state)
|
||||
result = {'changed': True}
|
||||
if monitor_state is not None:
|
||||
monitor_status = get_node_monitor_status(api, address)
|
||||
if monitor_state == 'enabled' and \
|
||||
monitor_status == 'forced_down':
|
||||
if not module.check_mode:
|
||||
set_node_monitor_state(api, address,
|
||||
monitor_state)
|
||||
result = {'changed': True}
|
||||
elif monitor_state == 'disabled' and \
|
||||
monitor_status != 'forced_down':
|
||||
if not module.check_mode:
|
||||
set_node_monitor_state(api, address,
|
||||
monitor_state)
|
||||
result = {'changed': True}
|
||||
if description is not None:
|
||||
if get_node_description(api, address) != description:
|
||||
if not module.check_mode:
|
||||
set_node_description(api, address, description)
|
||||
result = {'changed': True}
|
||||
|
||||
elif state in ('disabled', 'enabled'):
|
||||
if name is None:
|
||||
module.fail_json(msg="name parameter required when " \
|
||||
"state=enabled/disabled")
|
||||
if not module.check_mode:
|
||||
if not node_exists(api, name):
|
||||
module.fail_json(msg="node does not exist")
|
||||
status = get_node_session_status(api, name)
|
||||
if state == 'disabled':
|
||||
if status not in (STATUSES['disabled'], STATUSES['offline']):
|
||||
disabled, desc = set_node_disabled(api, name)
|
||||
if not disabled:
|
||||
module.fail_json(msg="unable to disable: %s" % desc)
|
||||
else:
|
||||
result = {'changed': True}
|
||||
else:
|
||||
if status != STATUSES['enabled']:
|
||||
enabled, desc = set_node_enabled(api, name)
|
||||
if not enabled:
|
||||
module.fail_json(msg="unable to enable: %s" % desc)
|
||||
else:
|
||||
result = {'changed': True}
|
||||
else:
|
||||
# check-mode return value
|
||||
result = {'changed': True}
|
||||
|
||||
except Exception, e:
|
||||
module.fail_json(msg="received exception: %s" % e)
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM pools"
|
|||
description:
|
||||
- "Manages F5 BIG-IP LTM pools via iControl SOAP API"
|
||||
version_added: "1.2"
|
||||
author: Matt Hite
|
||||
author: '"Matt Hite (@mhite)" <mhite@hotmail.com>'
|
||||
notes:
|
||||
- "Requires BIG-IP software version >= 11"
|
||||
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM pool members"
|
|||
description:
|
||||
- "Manages F5 BIG-IP LTM pool members via iControl SOAP API"
|
||||
version_added: "1.4"
|
||||
author: Matt Hite
|
||||
author: '"Matt Hite (@mhite)" <mhite@hotmail.com>'
|
||||
notes:
|
||||
- "Requires BIG-IP software version >= 11"
|
||||
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
|
||||
|
@ -39,23 +39,14 @@ options:
|
|||
description:
|
||||
- BIG-IP host
|
||||
required: true
|
||||
default: null
|
||||
choices: []
|
||||
aliases: []
|
||||
user:
|
||||
description:
|
||||
- BIG-IP username
|
||||
required: true
|
||||
default: null
|
||||
choices: []
|
||||
aliases: []
|
||||
password:
|
||||
description:
|
||||
- BIG-IP password
|
||||
required: true
|
||||
default: null
|
||||
choices: []
|
||||
aliases: []
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated. This should only be used
|
||||
|
@ -70,63 +61,58 @@ options:
|
|||
required: true
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
aliases: []
|
||||
session_state:
|
||||
description:
|
||||
- Set new session availability status for pool member
|
||||
version_added: "2.0"
|
||||
required: false
|
||||
default: null
|
||||
choices: ['enabled', 'disabled']
|
||||
monitor_state:
|
||||
description:
|
||||
- Set monitor availability status for pool member
|
||||
version_added: "2.0"
|
||||
required: false
|
||||
default: null
|
||||
choices: ['enabled', 'disabled']
|
||||
pool:
|
||||
description:
|
||||
- Pool name. This pool must exist.
|
||||
required: true
|
||||
default: null
|
||||
choices: []
|
||||
aliases: []
|
||||
partition:
|
||||
description:
|
||||
- Partition
|
||||
required: false
|
||||
default: 'Common'
|
||||
choices: []
|
||||
aliases: []
|
||||
host:
|
||||
description:
|
||||
- Pool member IP
|
||||
required: true
|
||||
default: null
|
||||
choices: []
|
||||
aliases: ['address', 'name']
|
||||
port:
|
||||
description:
|
||||
- Pool member port
|
||||
required: true
|
||||
default: null
|
||||
choices: []
|
||||
aliases: []
|
||||
connection_limit:
|
||||
description:
|
||||
- Pool member connection limit. Setting this to 0 disables the limit.
|
||||
required: false
|
||||
default: null
|
||||
choices: []
|
||||
aliases: []
|
||||
description:
|
||||
description:
|
||||
- Pool member description
|
||||
required: false
|
||||
default: null
|
||||
choices: []
|
||||
aliases: []
|
||||
rate_limit:
|
||||
description:
|
||||
- Pool member rate limit (connections-per-second). Setting this to 0 disables the limit.
|
||||
required: false
|
||||
default: null
|
||||
choices: []
|
||||
aliases: []
|
||||
ratio:
|
||||
description:
|
||||
- Pool member ratio weight. Valid values range from 1 through 100. New pool members -- unless overriden with this value -- default to 1.
|
||||
required: false
|
||||
default: null
|
||||
choices: []
|
||||
aliases: []
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -180,6 +166,34 @@ EXAMPLES = '''
|
|||
host="{{ ansible_default_ipv4["address"] }}"
|
||||
port=80
|
||||
|
||||
|
||||
# The BIG-IP GUI doesn't map directly to the API calls for "Pool ->
|
||||
# Members -> State". The following states map to API monitor
|
||||
# and session states.
|
||||
#
|
||||
# Enabled (all traffic allowed):
|
||||
# monitor_state=enabled, session_state=enabled
|
||||
# Disabled (only persistent or active connections allowed):
|
||||
# monitor_state=enabled, session_state=disabled
|
||||
# Forced offline (only active connections allowed):
|
||||
# monitor_state=disabled, session_state=disabled
|
||||
#
|
||||
# See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down
|
||||
|
||||
- name: Force pool member offline
|
||||
local_action: >
|
||||
bigip_pool_member
|
||||
server=lb.mydomain.com
|
||||
user=admin
|
||||
password=mysecret
|
||||
state=present
|
||||
session_state=disabled
|
||||
monitor_state=disabled
|
||||
pool=matthite-pool
|
||||
partition=matthite
|
||||
host="{{ ansible_default_ipv4["address"] }}"
|
||||
port=80
|
||||
|
||||
'''
|
||||
|
||||
try:
|
||||
|
@ -290,6 +304,28 @@ def set_ratio(api, pool, address, port, ratio):
|
|||
members = [{'address': address, 'port': port}]
|
||||
api.LocalLB.Pool.set_member_ratio(pool_names=[pool], members=[members], ratios=[[ratio]])
|
||||
|
||||
def set_member_session_enabled_state(api, pool, address, port, session_state):
|
||||
members = [{'address': address, 'port': port}]
|
||||
session_state = ["STATE_%s" % session_state.strip().upper()]
|
||||
api.LocalLB.Pool.set_member_session_enabled_state(pool_names=[pool], members=[members], session_states=[session_state])
|
||||
|
||||
def get_member_session_status(api, pool, address, port):
|
||||
members = [{'address': address, 'port': port}]
|
||||
result = api.LocalLB.Pool.get_member_session_status(pool_names=[pool], members=[members])[0][0]
|
||||
result = result.split("SESSION_STATUS_")[-1].lower()
|
||||
return result
|
||||
|
||||
def set_member_monitor_state(api, pool, address, port, monitor_state):
|
||||
members = [{'address': address, 'port': port}]
|
||||
monitor_state = ["STATE_%s" % monitor_state.strip().upper()]
|
||||
api.LocalLB.Pool.set_member_monitor_state(pool_names=[pool], members=[members], monitor_states=[monitor_state])
|
||||
|
||||
def get_member_monitor_status(api, pool, address, port):
|
||||
members = [{'address': address, 'port': port}]
|
||||
result = api.LocalLB.Pool.get_member_monitor_status(pool_names=[pool], members=[members])[0][0]
|
||||
result = result.split("MONITOR_STATUS_")[-1].lower()
|
||||
return result
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
|
@ -298,6 +334,8 @@ def main():
|
|||
password = dict(type='str', required=True),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
state = dict(type='str', default='present', choices=['present', 'absent']),
|
||||
session_state = dict(type='str', choices=['enabled', 'disabled']),
|
||||
monitor_state = dict(type='str', choices=['enabled', 'disabled']),
|
||||
pool = dict(type='str', required=True),
|
||||
partition = dict(type='str', default='Common'),
|
||||
host = dict(type='str', required=True, aliases=['address', 'name']),
|
||||
|
@ -318,6 +356,8 @@ def main():
|
|||
password = module.params['password']
|
||||
validate_certs = module.params['validate_certs']
|
||||
state = module.params['state']
|
||||
session_state = module.params['session_state']
|
||||
monitor_state = module.params['monitor_state']
|
||||
partition = module.params['partition']
|
||||
pool = "/%s/%s" % (partition, module.params['pool'])
|
||||
connection_limit = module.params['connection_limit']
|
||||
|
@ -366,6 +406,10 @@ def main():
|
|||
set_rate_limit(api, pool, address, port, rate_limit)
|
||||
if ratio is not None:
|
||||
set_ratio(api, pool, address, port, ratio)
|
||||
if session_state is not None:
|
||||
set_member_session_enabled_state(api, pool, address, port, session_state)
|
||||
if monitor_state is not None:
|
||||
set_member_monitor_state(api, pool, address, port, monitor_state)
|
||||
result = {'changed': True}
|
||||
else:
|
||||
# pool member exists -- potentially modify attributes
|
||||
|
@ -385,6 +429,26 @@ def main():
|
|||
if not module.check_mode:
|
||||
set_ratio(api, pool, address, port, ratio)
|
||||
result = {'changed': True}
|
||||
if session_state is not None:
|
||||
session_status = get_member_session_status(api, pool, address, port)
|
||||
if session_state == 'enabled' and session_status == 'forced_disabled':
|
||||
if not module.check_mode:
|
||||
set_member_session_enabled_state(api, pool, address, port, session_state)
|
||||
result = {'changed': True}
|
||||
elif session_state == 'disabled' and session_status != 'force_disabled':
|
||||
if not module.check_mode:
|
||||
set_member_session_enabled_state(api, pool, address, port, session_state)
|
||||
result = {'changed': True}
|
||||
if monitor_state is not None:
|
||||
monitor_status = get_member_monitor_status(api, pool, address, port)
|
||||
if monitor_state == 'enabled' and monitor_status == 'forced_down':
|
||||
if not module.check_mode:
|
||||
set_member_monitor_state(api, pool, address, port, monitor_state)
|
||||
result = {'changed': True}
|
||||
elif monitor_state == 'disabled' and monitor_status != 'forced_down':
|
||||
if not module.check_mode:
|
||||
set_member_monitor_state(api, pool, address, port, monitor_state)
|
||||
result = {'changed': True}
|
||||
|
||||
except Exception, e:
|
||||
module.fail_json(msg="received exception: %s" % e)
|
||||
|
|
|
@ -91,7 +91,7 @@ examples:
|
|||
# enable server in 'www' backend pool with change server(s) weight
|
||||
- haproxy: state=enabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock weight=10 backend=www
|
||||
|
||||
author: Ravi Bhure <ravibhure@gmail.com>
|
||||
author: "Ravi Bhure (@ravibhure)" <ravibhure@gmail.com>
|
||||
'''
|
||||
|
||||
import socket
|
||||
|
|
2
network/lldp.py
Executable file → Normal file
2
network/lldp.py
Executable file → Normal file
|
@ -24,7 +24,7 @@ short_description: get details reported by lldp
|
|||
description:
|
||||
- Reads data out of lldpctl
|
||||
options: {}
|
||||
author: Andy Hill
|
||||
author: "Andy Hill (@andyhky)"
|
||||
notes:
|
||||
- Requires lldpd running and lldp enabled on switches
|
||||
'''
|
||||
|
|
|
@ -22,7 +22,7 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: openvswitch_bridge
|
||||
version_added: 1.4
|
||||
author: David Stygstra
|
||||
author: '"David Stygstra (@stygstra)" <david.stygstra@gmail.com>'
|
||||
short_description: Manage Open vSwitch bridges
|
||||
requirements: [ ovs-vsctl ]
|
||||
description:
|
||||
|
|
|
@ -22,7 +22,7 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: openvswitch_port
|
||||
version_added: 1.4
|
||||
author: David Stygstra
|
||||
author: '"David Stygstra (@stygstra)" <david.stygstra@gmail.com>'
|
||||
short_description: Manage Open vSwitch ports
|
||||
requirements: [ ovs-vsctl ]
|
||||
description:
|
||||
|
|
2
network/snmp_facts.py
Executable file → Normal file
2
network/snmp_facts.py
Executable file → Normal file
|
@ -20,7 +20,7 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: snmp_facts
|
||||
version_added: "1.9"
|
||||
author: Patrick Ogenstad (@networklore)
|
||||
author: "Patrick Ogenstad (@ogenstad)"
|
||||
short_description: Retrive facts for a device using SNMP.
|
||||
description:
|
||||
- Retrieve facts for a device using SNMP, the facts will be
|
||||
|
|
|
@ -43,7 +43,7 @@ options:
|
|||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ urllib2, cgi ]
|
||||
author: Adam Garside <adam.garside@gmail.com>
|
||||
author: '"Adam Garside (@fabulops)" <adam.garside@gmail.com>'
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -22,7 +22,7 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: flowdock
|
||||
version_added: "1.2"
|
||||
author: Matt Coddington
|
||||
author: '"Matt Coddington (@mcodd)" <coddington@gmail.com>'
|
||||
short_description: Send a message to a flowdock
|
||||
description:
|
||||
- Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
|
||||
|
|
|
@ -25,11 +25,11 @@ options:
|
|||
required: true
|
||||
url:
|
||||
description:
|
||||
- Service URL for the web client
|
||||
- Service URL for the web client
|
||||
required: false
|
||||
icon_url:
|
||||
description:
|
||||
- Icon for the service
|
||||
- Icon for the service
|
||||
required: false
|
||||
validate_certs:
|
||||
description:
|
||||
|
@ -39,7 +39,7 @@ options:
|
|||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
version_added: 1.5.1
|
||||
author: Jonas Pfenniger <zimbatm@zimbatm.com>
|
||||
author: '"Jonas Pfenniger (@zimbatm)" <zimbatm@zimbatm.com>'
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -58,13 +58,13 @@ options:
|
|||
description:
|
||||
- API url if using a self-hosted hipchat server
|
||||
required: false
|
||||
default: 'https://api.hipchat.com/v1/rooms/message'
|
||||
default: 'https://api.hipchat.com/v1'
|
||||
version_added: 1.6.0
|
||||
|
||||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ urllib, urllib2 ]
|
||||
author: WAKAYAMA Shirou
|
||||
author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul (@pb8226)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -75,11 +75,17 @@ EXAMPLES = '''
|
|||
# HipChat module specific support methods.
|
||||
#
|
||||
|
||||
MSG_URI = "https://api.hipchat.com/v1/rooms/message"
|
||||
DEFAULT_URI = "https://api.hipchat.com/v1"
|
||||
|
||||
def send_msg(module, token, room, msg_from, msg, msg_format='text',
|
||||
color='yellow', notify=False, api=MSG_URI):
|
||||
'''sending message to hipchat'''
|
||||
MSG_URI_V1 = "/rooms/message"
|
||||
|
||||
MSG_URI_V2 = "/room/{id_or_name}/message"
|
||||
NOTIFY_URI_V2 = "/room/{id_or_name}/notification"
|
||||
|
||||
def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
|
||||
color='yellow', notify=False, api=MSG_URI_V1):
|
||||
'''sending message to hipchat v1 server'''
|
||||
print "Sending message to v1 server"
|
||||
|
||||
params = {}
|
||||
params['room_id'] = room
|
||||
|
@ -94,8 +100,13 @@ def send_msg(module, token, room, msg_from, msg, msg_format='text',
|
|||
else:
|
||||
params['notify'] = 0
|
||||
|
||||
url = api + "?auth_token=%s" % (token)
|
||||
url = api + MSG_URI_V1 + "?auth_token=%s" % (token)
|
||||
data = urllib.urlencode(params)
|
||||
|
||||
if module.check_mode:
|
||||
# In check mode, exit before actually sending the message
|
||||
module.exit_json(changed=False)
|
||||
|
||||
response, info = fetch_url(module, url, data=data)
|
||||
if info['status'] == 200:
|
||||
return response.read()
|
||||
|
@ -103,6 +114,37 @@ def send_msg(module, token, room, msg_from, msg, msg_format='text',
|
|||
module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
|
||||
|
||||
|
||||
def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
|
||||
color='yellow', notify=False, api=MSG_URI_V2):
|
||||
'''sending message to hipchat v2 server'''
|
||||
print "Sending message to v2 server"
|
||||
|
||||
headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'}
|
||||
|
||||
body = dict()
|
||||
body['message'] = msg
|
||||
body['color'] = color
|
||||
body['message_format'] = msg_format
|
||||
|
||||
if notify:
|
||||
POST_URL = api + NOTIFY_URI_V2
|
||||
else:
|
||||
POST_URL = api + MSG_URI_V2
|
||||
|
||||
url = POST_URL.replace('{id_or_name}',room)
|
||||
data = json.dumps(body)
|
||||
|
||||
if module.check_mode:
|
||||
# In check mode, exit before actually sending the message
|
||||
module.exit_json(changed=False)
|
||||
|
||||
response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
|
||||
if info['status'] == 200:
|
||||
return response.read()
|
||||
else:
|
||||
module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
@ -119,8 +161,8 @@ def main():
|
|||
"purple", "gray", "random"]),
|
||||
msg_format=dict(default="text", choices=["text", "html"]),
|
||||
notify=dict(default=True, type='bool'),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
api = dict(default=MSG_URI),
|
||||
validate_certs=dict(default='yes', type='bool'),
|
||||
api=dict(default=DEFAULT_URI),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
@ -135,7 +177,10 @@ def main():
|
|||
api = module.params["api"]
|
||||
|
||||
try:
|
||||
send_msg(module, token, room, msg_from, msg, msg_format, color, notify, api)
|
||||
if api.find('/v2') != -1:
|
||||
send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api)
|
||||
else:
|
||||
send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="unable to send msg: %s" % e)
|
||||
|
||||
|
|
|
@ -80,7 +80,9 @@ options:
|
|||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ socket ]
|
||||
author: Jan-Piet Mens, Matt Martz
|
||||
author:
|
||||
- '"Jan-Piet Mens (@jpmens)"'
|
||||
- '"Matt Martz (@sivel)"'
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -42,7 +42,7 @@ options:
|
|||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ xmpp ]
|
||||
author: Brian Coca
|
||||
author: "Brian Coca (@bcoca)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
author: Dag Wieers
|
||||
author: '"Dag Wieers (@dagwieers)" <dag@wieers.com>'
|
||||
module: mail
|
||||
short_description: Send an email
|
||||
description:
|
||||
|
@ -138,6 +138,13 @@ EXAMPLES = '''
|
|||
attach="/etc/group /tmp/pavatar2.png"
|
||||
headers=Reply-To=john@example.com|X-Special="Something or other"
|
||||
charset=utf8
|
||||
# Sending an e-mail using the remote machine, not the Ansible controller node
|
||||
- mail:
|
||||
host='localhost'
|
||||
port=25
|
||||
to="John Smith <john.smith@example.com>"
|
||||
subject='Ansible-report'
|
||||
body='System {{ ansible_hostname }} has been successfully provisioned.'
|
||||
'''
|
||||
|
||||
import os
|
||||
|
|
|
@ -81,7 +81,7 @@ requirements: [ mosquitto ]
|
|||
notes:
|
||||
- This module requires a connection to an MQTT broker such as Mosquitto
|
||||
U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.python.org/pypi/paho-mqtt)).
|
||||
author: Jan-Piet Mens
|
||||
author: "Jan-Piet Mens (@jpmens)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -24,7 +24,7 @@ short_description: Send a SMS via nexmo
|
|||
description:
|
||||
- Send a SMS message via nexmo
|
||||
version_added: 1.6
|
||||
author: Matt Martz
|
||||
author: '"Matt Martz (@sivel)" <matt@sivel.net>'
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
|
|
|
@ -37,7 +37,9 @@ options:
|
|||
What voice to use
|
||||
required: false
|
||||
requirements: [ say ]
|
||||
author: Michael DeHaan
|
||||
author:
|
||||
- "Ansible Core Team"
|
||||
- "Michael DeHaan (@mpdehaan)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
182
notification/pushbullet.py
Normal file
182
notification/pushbullet.py
Normal file
|
@ -0,0 +1,182 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
author: "Willy Barro (@willybarro)"
|
||||
requirements: [ pushbullet.py ]
|
||||
module: pushbullet
|
||||
short_description: Sends notifications to Pushbullet
|
||||
description:
|
||||
- This module sends push notifications via Pushbullet to channels or devices.
|
||||
version_added: "2.0"
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Push bullet API token
|
||||
required: true
|
||||
channel:
|
||||
description:
|
||||
- The channel TAG you wish to broadcast a push notification,
|
||||
as seen on the "My Channels" > "Edit your channel" at
|
||||
Pushbullet page.
|
||||
required: false
|
||||
default: null
|
||||
device:
|
||||
description:
|
||||
- The device NAME you wish to send a push notification,
|
||||
as seen on the Pushbullet main page.
|
||||
required: false
|
||||
default: null
|
||||
push_type:
|
||||
description:
|
||||
- Thing you wish to push.
|
||||
required: false
|
||||
default: note
|
||||
choices: [ "note", "link" ]
|
||||
title:
|
||||
description:
|
||||
- Title of the notification.
|
||||
required: true
|
||||
body:
|
||||
description:
|
||||
- Body of the notification, e.g. Details of the fault you're alerting.
|
||||
required: false
|
||||
|
||||
notes:
|
||||
- Requires pushbullet.py Python package on the remote host.
|
||||
You can install it via pip with ($ pip install pushbullet.py).
|
||||
See U(https://github.com/randomchars/pushbullet.py)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Sends a push notification to a device
|
||||
- pushbullet:
|
||||
api_key: "ABC123abc123ABC123abc123ABC123ab"
|
||||
device: "Chrome"
|
||||
title: "You may see this on Google Chrome"
|
||||
|
||||
# Sends a link to a device
|
||||
- pushbullet:
|
||||
api_key: "ABC123abc123ABC123abc123ABC123ab"
|
||||
device: "Chrome"
|
||||
push_type: "link"
|
||||
title: "Ansible Documentation"
|
||||
body: "http://docs.ansible.com/"
|
||||
|
||||
# Sends a push notification to a channel
|
||||
- pushbullet:
|
||||
api_key: "ABC123abc123ABC123abc123ABC123ab"
|
||||
channel: "my-awesome-channel"
|
||||
title: "Broadcasting a message to the #my-awesome-channel folks"
|
||||
|
||||
# Sends a push notification with title and body to a channel
|
||||
- pushbullet:
|
||||
api_key: "ABC123abc123ABC123abc123ABC123ab"
|
||||
channel: "my-awesome-channel"
|
||||
title: "ALERT! Signup service is down"
|
||||
body: "Error rate on signup service is over 90% for more than 2 minutes"
|
||||
'''
|
||||
|
||||
try:
|
||||
from pushbullet import PushBullet
|
||||
from pushbullet.errors import InvalidKeyError, PushError
|
||||
except ImportError:
|
||||
pushbullet_found = False
|
||||
else:
|
||||
pushbullet_found = True
|
||||
|
||||
# ===========================================
|
||||
# Main
|
||||
#
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
api_key = dict(type='str', required=True),
|
||||
channel = dict(type='str', default=None),
|
||||
device = dict(type='str', default=None),
|
||||
push_type = dict(type='str', default="note", choices=['note', 'link']),
|
||||
title = dict(type='str', required=True),
|
||||
body = dict(type='str', default=None)
|
||||
),
|
||||
mutually_exclusive = (
|
||||
['channel', 'device'],
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
api_key = module.params['api_key']
|
||||
channel = module.params['channel']
|
||||
device = module.params['device']
|
||||
push_type = module.params['push_type']
|
||||
title = module.params['title']
|
||||
body = module.params['body']
|
||||
|
||||
if not pushbullet_found:
|
||||
module.fail_json(msg="Python 'pushbullet.py' module is required. Install via: $ pip install pushbullet.py")
|
||||
|
||||
# Init pushbullet
|
||||
try:
|
||||
pb = PushBullet(api_key)
|
||||
target = None
|
||||
except InvalidKeyError:
|
||||
module.fail_json(msg="Invalid api_key")
|
||||
|
||||
# Checks for channel/device
|
||||
if device is None and channel is None:
|
||||
module.fail_json(msg="You need to provide a channel or a device.")
|
||||
|
||||
# Search for given device
|
||||
if device is not None:
|
||||
devices_by_nickname = {}
|
||||
for d in pb.devices:
|
||||
devices_by_nickname[d.nickname] = d
|
||||
|
||||
if device in devices_by_nickname:
|
||||
target = devices_by_nickname[device]
|
||||
else:
|
||||
module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys())))
|
||||
|
||||
# Search for given channel
|
||||
if channel is not None:
|
||||
channels_by_tag = {}
|
||||
for c in pb.channels:
|
||||
channels_by_tag[c.channel_tag] = c
|
||||
|
||||
if channel in channels_by_tag:
|
||||
target = channels_by_tag[channel]
|
||||
else:
|
||||
module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys())))
|
||||
|
||||
# If in check mode, exit saying that we succeeded
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=False, msg="OK")
|
||||
|
||||
# Send push notification
|
||||
try:
|
||||
target.push_note(title, body)
|
||||
module.exit_json(changed=False, msg="OK")
|
||||
except PushError as e:
|
||||
module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e))
|
||||
|
||||
module.fail_json(msg="An unknown error has occurred")
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
|
@ -48,7 +48,7 @@ options:
|
|||
description: Message priority (see u(https://pushover.net) for details.)
|
||||
required: false
|
||||
|
||||
author: Jim Richardson
|
||||
author: '"Jim Richardson (@weaselkeeper)" <weaselkeeper@gmail.com>'
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
|
@ -53,7 +53,7 @@ options:
|
|||
the desired subject for the email
|
||||
required: true
|
||||
|
||||
author: Matt Makai
|
||||
author: '"Matt Makai (@makaimc)" <matthew.makai@gmail.com>'
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -24,7 +24,7 @@ short_description: Send Slack notifications
|
|||
description:
|
||||
- The M(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration
|
||||
version_added: 1.6
|
||||
author: Ramon de la Fuente <ramon@delafuente.nl>
|
||||
author: '"Ramon de la Fuente (@ramondelafuente)" <ramon@delafuente.nl>'
|
||||
options:
|
||||
domain:
|
||||
description:
|
||||
|
@ -141,9 +141,9 @@ def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoj
|
|||
else:
|
||||
payload = dict(attachments=[dict(text=text, color=color)])
|
||||
if channel is not None:
|
||||
if (channel[0] == '#') or (channel[0] == '@')
|
||||
if (channel[0] == '#') or (channel[0] == '@'):
|
||||
payload['channel'] = channel
|
||||
else
|
||||
else:
|
||||
payload['channel'] = '#'+channel
|
||||
if username is not None:
|
||||
payload['username'] = username
|
||||
|
|
|
@ -24,7 +24,7 @@ short_description: Send Amazon Simple Notification Service (SNS) messages
|
|||
description:
|
||||
- The M(sns) module sends notifications to a topic on your Amazon SNS account
|
||||
version_added: 1.6
|
||||
author: Michael J. Schultz <mjschultz@gmail.com>
|
||||
author: '"Michael J. Schultz (@mjschultz)" <mjschultz@gmail.com>'
|
||||
options:
|
||||
msg:
|
||||
description:
|
||||
|
|
|
@ -58,7 +58,7 @@ options:
|
|||
(multimedia message) instead of a plain SMS
|
||||
required: false
|
||||
|
||||
author: Matt Makai
|
||||
author: '"Matt Makai (@makaimc)" <matthew.makai@gmail.com>'
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -26,7 +26,7 @@ options:
|
|||
- message body
|
||||
required: true
|
||||
requirements: [ urllib, urllib2, json ]
|
||||
author: Takashi Someda <someda@isenshi.com>
|
||||
author: '"Takashi Someda (@tksmd)" <someda@isenshi.com>'
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: Manage bower packages with bower
|
|||
description:
|
||||
- Manage bower packages with bower
|
||||
version_added: 1.9
|
||||
author: Michael Warkentin
|
||||
author: '"Michael Warkentin (@mwarkentin)" <mwarkentin@gmail.com>'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
@ -154,7 +154,7 @@ def main():
|
|||
|
||||
name = module.params['name']
|
||||
offline = module.params['offline']
|
||||
path = module.params['path']
|
||||
path = os.path.expanduser(module.params['path'])
|
||||
state = module.params['state']
|
||||
version = module.params['version']
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: composer
|
||||
author: Dimitrios Tydeas Mengidis
|
||||
author: '"Dimitrios Tydeas Mengidis (@dmtrs)" <tydeas.dr@gmail.com>'
|
||||
short_description: Dependency Manager for PHP
|
||||
version_added: "1.6"
|
||||
description:
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue