resolving merge conflicts
This commit is contained in:
commit
2791edc496
162 changed files with 10149 additions and 676 deletions
160
REVIEWERS.md
Normal file
160
REVIEWERS.md
Normal file
|
@ -0,0 +1,160 @@
|
|||
New module reviewers
|
||||
====================
|
||||
The following list represents all current Github module reviewers. It's currently comprised of all Ansible module authors, past and present.
|
||||
|
||||
Two +1 votes by any of these module reviewers on a new module pull request will result in the inclusion of that module into Ansible Extras.
|
||||
|
||||
Active
|
||||
======
|
||||
"Adam Garside (@fabulops)"
|
||||
"Adam Keech (@smadam813)"
|
||||
"Adam Miller (@maxamillion)"
|
||||
"Alex Coomans (@drcapulet)"
|
||||
"Alexander Bulimov (@abulimov)"
|
||||
"Alexander Saltanov (@sashka)"
|
||||
"Alexander Winkler (@dermute)"
|
||||
"Andrew de Quincey (@adq)"
|
||||
"André Paramés (@andreparames)"
|
||||
"Andy Hill (@andyhky)"
|
||||
"Artūras `arturaz` Šlajus (@arturaz)"
|
||||
"Augustus Kling (@AugustusKling)"
|
||||
"BOURDEL Paul (@pb8226)"
|
||||
"Balazs Pocze (@banyek)"
|
||||
"Ben Whaley (@bwhaley)"
|
||||
"Benno Joy (@bennojoy)"
|
||||
"Bernhard Weitzhofer (@b6d)"
|
||||
"Boyd Adamson (@brontitall)"
|
||||
"Brad Olson (@bradobro)"
|
||||
"Brian Coca (@bcoca)"
|
||||
"Brice Burgess (@briceburg)"
|
||||
"Bruce Pennypacker (@bpennypacker)"
|
||||
"Carson Gee (@carsongee)"
|
||||
"Chris Church (@cchurch)"
|
||||
"Chris Hoffman (@chrishoffman)"
|
||||
"Chris Long (@alcamie101)"
|
||||
"Chris Schmidt (@chrisisbeef)"
|
||||
"Christian Berendt (@berendt)"
|
||||
"Christopher H. Laco (@claco)"
|
||||
"Cristian van Ee (@DJMuggs)"
|
||||
"Dag Wieers (@dagwieers)"
|
||||
"Dane Summers (@dsummersl)"
|
||||
"Daniel Jaouen (@danieljaouen)"
|
||||
"Daniel Schep (@dschep)"
|
||||
"Dariusz Owczarek (@dareko)"
|
||||
"Darryl Stoflet (@dstoflet)"
|
||||
"David CHANIAL (@davixx)"
|
||||
"David Stygstra (@stygstra)"
|
||||
"Derek Carter (@goozbach)"
|
||||
"Dimitrios Tydeas Mengidis (@dmtrs)"
|
||||
"Doug Luce (@dougluce)"
|
||||
"Dylan Martin (@pileofrogs)"
|
||||
"Elliott Foster (@elliotttf)"
|
||||
"Eric Johnson (@erjohnso)"
|
||||
"Evan Duffield (@scicoin-project)"
|
||||
"Evan Kaufman (@EvanK)"
|
||||
"Evgenii Terechkov (@evgkrsk)"
|
||||
"Franck Cuny (@franckcuny)"
|
||||
"Gareth Rushgrove (@garethr)"
|
||||
"Hagai Kariti (@hkariti)"
|
||||
"Hector Acosta (@hacosta)"
|
||||
"Hiroaki Nakamura (@hnakamur)"
|
||||
"Ivan Vanderbyl (@ivanvanderbyl)"
|
||||
"Jakub Jirutka (@jirutka)"
|
||||
"James Cammarata (@jimi-c)"
|
||||
"James Laska (@jlaska)"
|
||||
"James S. Martin (@jsmartin)"
|
||||
"Jan-Piet Mens (@jpmens)"
|
||||
"Jayson Vantuyl (@jvantuyl)"
|
||||
"Jens Depuydt (@jensdepuydt)"
|
||||
"Jeroen Hoekx (@jhoekx)"
|
||||
"Jesse Keating (@j2sol)"
|
||||
"Jim Dalton (@jsdalton)"
|
||||
"Jim Richardson (@weaselkeeper)"
|
||||
"Jimmy Tang (@jcftang)"
|
||||
"Johan Wiren (@johanwiren)"
|
||||
"John Dewey (@retr0h)"
|
||||
"John Jarvis (@jarv)"
|
||||
"John Whitbeck (@jwhitbeck)"
|
||||
"Jon Hawkesworth (@jhawkesworth)"
|
||||
"Jonas Pfenniger (@zimbatm)"
|
||||
"Jonathan I. Davila (@defionscode)"
|
||||
"Joseph Callen (@jcpowermac)"
|
||||
"Kevin Carter (@cloudnull)"
|
||||
"Lester Wade (@lwade)"
|
||||
"Lorin Hochstein (@lorin)"
|
||||
"Manuel Sousa (@manuel-sousa)"
|
||||
"Mark Theunissen (@marktheunissen)"
|
||||
"Matt Coddington (@mcodd)"
|
||||
"Matt Hite (@mhite)"
|
||||
"Matt Makai (@makaimc)"
|
||||
"Matt Martz (@sivel)"
|
||||
"Matt Wright (@mattupstate)"
|
||||
"Matthew Vernon (@mcv21)"
|
||||
"Matthew Williams (@mgwilliams)"
|
||||
"Matthias Vogelgesang (@matze)"
|
||||
"Max Riveiro (@kavu)"
|
||||
"Michael Gregson (@mgregson)"
|
||||
"Michael J. Schultz (@mjschultz)"
|
||||
"Michael Warkentin (@mwarkentin)"
|
||||
"Mischa Peters (@mischapeters)"
|
||||
"Monty Taylor (@emonty)"
|
||||
"Nandor Sivok (@dominis)"
|
||||
"Nate Coraor (@natefoo)"
|
||||
"Nate Kingsley (@nate-kingsley)"
|
||||
"Nick Harring (@NickatEpic)"
|
||||
"Patrick Callahan (@dirtyharrycallahan)"
|
||||
"Patrick Ogenstad (@ogenstad)"
|
||||
"Patrick Pelletier (@skinp)"
|
||||
"Patrik Lundin (@eest)"
|
||||
"Paul Durivage (@angstwad)"
|
||||
"Pavel Antonov (@softzilla)"
|
||||
"Pepe Barbe (@elventear)"
|
||||
"Peter Mounce (@petemounce)"
|
||||
"Peter Oliver (@mavit)"
|
||||
"Peter Sprygada (@privateip)"
|
||||
"Peter Tan (@tanpeter)"
|
||||
"Philippe Makowski (@pmakowski)"
|
||||
"Phillip Gentry, CX Inc (@pcgentry)"
|
||||
"Quentin Stafford-Fraser (@quentinsf)"
|
||||
"Ramon de la Fuente (@ramondelafuente)"
|
||||
"Raul Melo (@melodous)"
|
||||
"Ravi Bhure (@ravibhure)"
|
||||
"René Moser (@resmo)"
|
||||
"Richard Hoop (@rhoop)"
|
||||
"Richard Isaacson (@risaacson)"
|
||||
"Rick Mendes (@rickmendes)"
|
||||
"Romeo Theriault (@romeotheriault)"
|
||||
"Scott Anderson (@tastychutney)"
|
||||
"Sebastian Kornehl (@skornehl)"
|
||||
"Serge van Ginderachter (@srvg)"
|
||||
"Sergei Antipov (@UnderGreen)"
|
||||
"Seth Edwards (@sedward)"
|
||||
"Silviu Dicu (@silviud)"
|
||||
"Simon JAILLET (@jails)"
|
||||
"Stephen Fromm (@sfromm)"
|
||||
"Steve (@groks)"
|
||||
"Steve Gargan (@sgargan)"
|
||||
"Steve Smith (@tarka)"
|
||||
"Takashi Someda (@tksmd)"
|
||||
"Taneli Leppä (@rosmo)"
|
||||
"Tim Bielawa (@tbielawa)"
|
||||
"Tim Bielawa (@tbielawa)"
|
||||
"Tim Mahoney (@timmahoney)"
|
||||
"Timothy Appnel (@tima)"
|
||||
"Tom Bamford (@tombamford)"
|
||||
"Trond Hindenes (@trondhindenes)"
|
||||
"Vincent Van der Kussen (@vincentvdk)"
|
||||
"Vincent Viallet (@zbal)"
|
||||
"WAKAYAMA Shirou (@shirou)"
|
||||
"Will Thames (@willthames)"
|
||||
"Willy Barro (@willybarro)"
|
||||
"Xabier Larrakoetxea (@slok)"
|
||||
"Yeukhon Wong (@yeukhon)"
|
||||
"Zacharie Eakin (@zeekin)"
|
||||
"berenddeboer (@berenddeboer)"
|
||||
"bleader (@bleader)"
|
||||
"curtis (@ccollicutt)"
|
||||
|
||||
Retired
|
||||
=======
|
||||
None yet :)
|
88
cloud/amazon/GUIDELINES.md
Normal file
88
cloud/amazon/GUIDELINES.md
Normal file
|
@ -0,0 +1,88 @@
|
|||
Guidelines for AWS modules
|
||||
--------------------------
|
||||
|
||||
Naming your module
|
||||
==================
|
||||
|
||||
Base the name of the module on the part of AWS that
|
||||
you actually use. (A good rule of thumb is to take
|
||||
whatever module you use with boto as a starting point).
|
||||
|
||||
Don't further abbreviate names - if something is a well
|
||||
known abbreviation due to it being a major component of
|
||||
AWS, that's fine, but don't create new ones independently
|
||||
(e.g. VPC, ELB, etc. are fine)
|
||||
|
||||
Using boto
|
||||
==========
|
||||
|
||||
Wrap the `import` statements in a try block and fail the
|
||||
module later on if the import fails
|
||||
|
||||
```
|
||||
try:
|
||||
import boto
|
||||
import boto.module.that.you.use
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
<lots of code here>
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
module_specific_parameter=dict(),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
)
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
```
|
||||
|
||||
|
||||
Try and keep backward compatibility with relatively recent
|
||||
versions of boto. That means that if want to implement some
|
||||
functionality that uses a new feature of boto, it should only
|
||||
fail if that feature actually needs to be run, with a message
|
||||
saying which version of boto is needed.
|
||||
|
||||
Use feature testing (e.g. `hasattr('boto.module', 'shiny_new_method')`)
|
||||
to check whether boto supports a feature rather than version checking
|
||||
|
||||
e.g. from the `ec2` module:
|
||||
```
|
||||
if boto_supports_profile_name_arg(ec2):
|
||||
params['instance_profile_name'] = instance_profile_name
|
||||
else:
|
||||
if instance_profile_name is not None:
|
||||
module.fail_json(
|
||||
msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
|
||||
```
|
||||
|
||||
|
||||
Connecting to AWS
|
||||
=================
|
||||
|
||||
For EC2 you can just use
|
||||
|
||||
```
|
||||
ec2 = ec2_connect(module)
|
||||
```
|
||||
|
||||
For other modules, you should use `get_aws_connection_info` and then
|
||||
`connect_to_aws`. To connect to an example `xyz` service:
|
||||
|
||||
```
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
xyz = connect_to_aws(boto.xyz, region, **aws_connect_params)
|
||||
```
|
||||
|
||||
The reason for using `get_aws_connection_info` and `connect_to_aws`
|
||||
(and even `ec2_connect` uses those under the hood) rather than doing it
|
||||
yourself is that they handle some of the more esoteric connection
|
||||
options such as security tokens and boto profiles.
|
|
@ -21,7 +21,9 @@ short_description: manage CloudTrail creation and deletion
|
|||
description:
|
||||
- Creates or deletes CloudTrail configuration. Ensures logging is also enabled.
|
||||
version_added: "2.0"
|
||||
author: "Ted Timmons (@tedder)"
|
||||
author:
|
||||
- "Ansible Core Team"
|
||||
- "Ted Timmons"
|
||||
requirements:
|
||||
- "boto >= 2.21"
|
||||
options:
|
||||
|
@ -87,21 +89,17 @@ EXAMPLES = """
|
|||
s3_key_prefix='' region=us-east-1
|
||||
|
||||
- name: remove cloudtrail
|
||||
local_action: cloudtrail state=absent name=main region=us-east-1
|
||||
local_action: cloudtrail state=disabled name=main region=us-east-1
|
||||
"""
|
||||
|
||||
import time
|
||||
import sys
|
||||
import os
|
||||
from collections import Counter
|
||||
|
||||
boto_import_failed = False
|
||||
HAS_BOTO = False
|
||||
try:
|
||||
import boto
|
||||
import boto.cloudtrail
|
||||
from boto.regioninfo import RegionInfo
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
boto_import_failed = True
|
||||
HAS_BOTO = False
|
||||
|
||||
class CloudTrailManager:
|
||||
"""Handles cloudtrail configuration"""
|
||||
|
@ -152,9 +150,6 @@ class CloudTrailManager:
|
|||
|
||||
def main():
|
||||
|
||||
if not has_libcloud:
|
||||
module.fail_json(msg='boto is required.')
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state={'required': True, 'choices': ['enabled', 'disabled'] },
|
||||
|
@ -166,6 +161,10 @@ def main():
|
|||
required_together = ( ['state', 's3_bucket_name'] )
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto is required.')
|
||||
|
||||
ec2_url, access_key, secret_key, region = get_ec2_creds(module)
|
||||
aws_connect_params = dict(aws_access_key_id=access_key,
|
||||
aws_secret_access_key=secret_key)
|
||||
|
|
275
cloud/amazon/dynamodb_table.py
Normal file
275
cloud/amazon/dynamodb_table.py
Normal file
|
@ -0,0 +1,275 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: dynamodb_table
|
||||
short_description: Create, update or delete AWS Dynamo DB tables.
|
||||
description:
|
||||
- Create or delete AWS Dynamo DB tables.
|
||||
- Can update the provisioned throughput on existing tables.
|
||||
- Returns the status of the specified table.
|
||||
author: Alan Loi (@loia)
|
||||
version_added: "2.0"
|
||||
requirements:
|
||||
- "boto >= 2.13.2"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Create or delete the table
|
||||
required: false
|
||||
choices: ['present', 'absent']
|
||||
default: 'present'
|
||||
name:
|
||||
description:
|
||||
- Name of the table.
|
||||
required: true
|
||||
hash_key_name:
|
||||
description:
|
||||
- Name of the hash key.
|
||||
- Required when C(state=present).
|
||||
required: false
|
||||
hash_key_type:
|
||||
description:
|
||||
- Type of the hash key.
|
||||
required: false
|
||||
choices: ['STRING', 'NUMBER', 'BINARY']
|
||||
default: 'STRING'
|
||||
range_key_name:
|
||||
description:
|
||||
- Name of the range key.
|
||||
required: false
|
||||
range_key_type:
|
||||
description:
|
||||
- Type of the range key.
|
||||
required: false
|
||||
choices: ['STRING', 'NUMBER', 'BINARY']
|
||||
default: 'STRING'
|
||||
read_capacity:
|
||||
description:
|
||||
- Read throughput capacity (units) to provision.
|
||||
required: false
|
||||
default: 1
|
||||
write_capacity:
|
||||
description:
|
||||
- Write throughput capacity (units) to provision.
|
||||
required: false
|
||||
default: 1
|
||||
region:
|
||||
description:
|
||||
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
|
||||
required: false
|
||||
aliases: ['aws_region', 'ec2_region']
|
||||
|
||||
extends_documentation_fragment: aws
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create dynamo table with hash and range primary key
|
||||
- dynamodb_table:
|
||||
name: my-table
|
||||
region: us-east-1
|
||||
hash_key_name: id
|
||||
hash_key_type: STRING
|
||||
range_key_name: create_time
|
||||
range_key_type: NUMBER
|
||||
read_capacity: 2
|
||||
write_capacity: 2
|
||||
|
||||
# Update capacity on existing dynamo table
|
||||
- dynamodb_table:
|
||||
name: my-table
|
||||
region: us-east-1
|
||||
read_capacity: 10
|
||||
write_capacity: 10
|
||||
|
||||
# Delete dynamo table
|
||||
- dynamodb_table:
|
||||
name: my-table
|
||||
region: us-east-1
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
table_status:
|
||||
description: The current status of the table.
|
||||
returned: success
|
||||
type: string
|
||||
sample: ACTIVE
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto
|
||||
import boto.dynamodb2
|
||||
from boto.dynamodb2.table import Table
|
||||
from boto.dynamodb2.fields import HashKey, RangeKey
|
||||
from boto.dynamodb2.types import STRING, NUMBER, BINARY
|
||||
from boto.exception import BotoServerError, JSONResponseError
|
||||
HAS_BOTO = True
|
||||
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
|
||||
DYNAMO_TYPE_MAP = {
|
||||
'STRING': STRING,
|
||||
'NUMBER': NUMBER,
|
||||
'BINARY': BINARY
|
||||
}
|
||||
|
||||
|
||||
def create_or_update_dynamo_table(connection, module):
|
||||
table_name = module.params.get('name')
|
||||
hash_key_name = module.params.get('hash_key_name')
|
||||
hash_key_type = module.params.get('hash_key_type')
|
||||
range_key_name = module.params.get('range_key_name')
|
||||
range_key_type = module.params.get('range_key_type')
|
||||
read_capacity = module.params.get('read_capacity')
|
||||
write_capacity = module.params.get('write_capacity')
|
||||
|
||||
schema = [
|
||||
HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type)),
|
||||
RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type))
|
||||
]
|
||||
throughput = {
|
||||
'read': read_capacity,
|
||||
'write': write_capacity
|
||||
}
|
||||
|
||||
result = dict(
|
||||
region=module.params.get('region'),
|
||||
table_name=table_name,
|
||||
hash_key_name=hash_key_name,
|
||||
hash_key_type=hash_key_type,
|
||||
range_key_name=range_key_name,
|
||||
range_key_type=range_key_type,
|
||||
read_capacity=read_capacity,
|
||||
write_capacity=write_capacity,
|
||||
)
|
||||
|
||||
try:
|
||||
table = Table(table_name, connection=connection)
|
||||
|
||||
if dynamo_table_exists(table):
|
||||
result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode)
|
||||
else:
|
||||
if not module.check_mode:
|
||||
Table.create(table_name, connection=connection, schema=schema, throughput=throughput)
|
||||
result['changed'] = True
|
||||
|
||||
if not module.check_mode:
|
||||
result['table_status'] = table.describe()['Table']['TableStatus']
|
||||
|
||||
except BotoServerError:
|
||||
result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc()
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def delete_dynamo_table(connection, module):
|
||||
table_name = module.params.get('name')
|
||||
|
||||
result = dict(
|
||||
region=module.params.get('region'),
|
||||
table_name=table_name,
|
||||
)
|
||||
|
||||
try:
|
||||
table = Table(table_name, connection=connection)
|
||||
|
||||
if dynamo_table_exists(table):
|
||||
if not module.check_mode:
|
||||
table.delete()
|
||||
result['changed'] = True
|
||||
|
||||
else:
|
||||
result['changed'] = False
|
||||
|
||||
except BotoServerError:
|
||||
result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc()
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def dynamo_table_exists(table):
|
||||
try:
|
||||
table.describe()
|
||||
return True
|
||||
|
||||
except JSONResponseError, e:
|
||||
if e.message and e.message.startswith('Requested resource not found'):
|
||||
return False
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
||||
def update_dynamo_table(table, throughput=None, check_mode=False):
|
||||
table.describe() # populate table details
|
||||
|
||||
if has_throughput_changed(table, throughput):
|
||||
if not check_mode:
|
||||
return table.update(throughput=throughput)
|
||||
else:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def has_throughput_changed(table, new_throughput):
|
||||
if not new_throughput:
|
||||
return False
|
||||
|
||||
return new_throughput['read'] != table.throughput['read'] or \
|
||||
new_throughput['write'] != table.throughput['write']
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
name=dict(required=True, type='str'),
|
||||
hash_key_name=dict(required=True, type='str'),
|
||||
hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
|
||||
range_key_name=dict(type='str'),
|
||||
range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
|
||||
read_capacity=dict(default=1, type='int'),
|
||||
write_capacity=dict(default=1, type='int'),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params)
|
||||
|
||||
state = module.params.get('state')
|
||||
if state == 'present':
|
||||
create_or_update_dynamo_table(connection, module)
|
||||
elif state == 'absent':
|
||||
delete_dynamo_table(connection, module)
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
main()
|
208
cloud/amazon/ec2_ami_copy.py
Normal file
208
cloud/amazon/ec2_ami_copy.py
Normal file
|
@ -0,0 +1,208 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_ami_copy
|
||||
short_description: copies AMI between AWS regions, return new image id
|
||||
description:
|
||||
- Copies AMI from a source region to a destination region. This module has a dependency on python-boto >= 2.5
|
||||
version_added: "2.0"
|
||||
options:
|
||||
source_region:
|
||||
description:
|
||||
- the source region that AMI should be copied from
|
||||
required: true
|
||||
region:
|
||||
description:
|
||||
- the destination region that AMI should be copied to
|
||||
required: true
|
||||
aliases: ['aws_region', 'ec2_region', 'dest_region']
|
||||
source_image_id:
|
||||
description:
|
||||
- the id of the image in source region that should be copied
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- The name of the new image to copy
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
description:
|
||||
- An optional human-readable string describing the contents and purpose of the new AMI.
|
||||
required: false
|
||||
default: null
|
||||
wait:
|
||||
description:
|
||||
- wait for the copied AMI to be in state 'available' before returning.
|
||||
required: false
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
wait_timeout:
|
||||
description:
|
||||
- how long before wait gives up, in seconds
|
||||
required: false
|
||||
default: 1200
|
||||
tags:
|
||||
description:
|
||||
- a hash/dictionary of tags to add to the new copied AMI; '{"key":"value"}' and '{"key":"value","key":"value"}'
|
||||
required: false
|
||||
default: null
|
||||
|
||||
author: Amir Moulavi <amir.moulavi@gmail.com>
|
||||
extends_documentation_fragment: aws
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Basic AMI Copy
|
||||
- local_action:
|
||||
module: ec2_ami_copy
|
||||
source_region: eu-west-1
|
||||
dest_region: us-east-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
name: SuperService-new-AMI
|
||||
description: latest patch
|
||||
tags: '{"Name":"SuperService-new-AMI", "type":"SuperService"}'
|
||||
wait: yes
|
||||
register: image_id
|
||||
'''
|
||||
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
try:
|
||||
import boto
|
||||
import boto.ec2
|
||||
from boto.vpc import VPCConnection
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
def copy_image(module, ec2):
|
||||
"""
|
||||
Copies an AMI
|
||||
|
||||
module : AnsibleModule object
|
||||
ec2: authenticated ec2 connection object
|
||||
"""
|
||||
|
||||
source_region = module.params.get('source_region')
|
||||
source_image_id = module.params.get('source_image_id')
|
||||
name = module.params.get('name')
|
||||
description = module.params.get('description')
|
||||
tags = module.params.get('tags')
|
||||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
wait = module.params.get('wait')
|
||||
|
||||
try:
|
||||
params = {'source_region': source_region,
|
||||
'source_image_id': source_image_id,
|
||||
'name': name,
|
||||
'description': description
|
||||
}
|
||||
|
||||
image_id = ec2.copy_image(**params).image_id
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
img = wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait)
|
||||
|
||||
img = wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait)
|
||||
|
||||
register_tags_if_any(module, ec2, tags, image_id)
|
||||
|
||||
module.exit_json(msg="AMI copy operation complete", image_id=image_id, state=img.state, changed=True)
|
||||
|
||||
|
||||
# register tags to the copied AMI in dest_region
|
||||
def register_tags_if_any(module, ec2, tags, image_id):
|
||||
if tags:
|
||||
try:
|
||||
ec2.create_tags([image_id], tags)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
# wait here until the image is copied (i.e. the state becomes available
|
||||
def wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait):
|
||||
wait_timeout = time.time() + wait_timeout
|
||||
while wait and wait_timeout > time.time() and (img is None or img.state != 'available'):
|
||||
img = ec2.get_image(image_id)
|
||||
time.sleep(3)
|
||||
if wait and wait_timeout <= time.time():
|
||||
# waiting took too long
|
||||
module.fail_json(msg="timed out waiting for image to be copied")
|
||||
return img
|
||||
|
||||
|
||||
# wait until the image is recognized.
|
||||
def wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait):
|
||||
for i in range(wait_timeout):
|
||||
try:
|
||||
return ec2.get_image(image_id)
|
||||
except boto.exception.EC2ResponseError, e:
|
||||
# This exception we expect initially right after registering the copy with EC2 API
|
||||
if 'InvalidAMIID.NotFound' in e.error_code and wait:
|
||||
time.sleep(1)
|
||||
else:
|
||||
# On any other exception we should fail
|
||||
module.fail_json(
|
||||
msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help: " + str(
|
||||
e))
|
||||
else:
|
||||
module.fail_json(msg="timed out waiting for image to be recognized")
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
source_region=dict(required=True),
|
||||
source_image_id=dict(required=True),
|
||||
name=dict(),
|
||||
description=dict(default=""),
|
||||
wait=dict(type='bool', default=False),
|
||||
wait_timeout=dict(default=1200),
|
||||
tags=dict(type='dict')))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
try:
|
||||
ec2 = ec2_connect(module)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
try:
|
||||
region, ec2_url, boto_params = get_aws_connection_info(module)
|
||||
vpc = connect_to_aws(boto.vpc, region, **boto_params)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg = str(e))
|
||||
|
||||
if not region:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
copy_image(module, ec2)
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
main()
|
||||
|
404
cloud/amazon/ec2_eni.py
Normal file
404
cloud/amazon/ec2_eni.py
Normal file
|
@ -0,0 +1,404 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_eni
|
||||
short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
|
||||
description:
|
||||
- Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID is provided, an attempt is made to update the existing ENI. By passing 'None' as the instance_id, an ENI can be detached from an instance.
|
||||
version_added: "2.0"
|
||||
author: Rob White, wimnat [at] gmail.com, @wimnat
|
||||
options:
|
||||
eni_id:
|
||||
description:
|
||||
- The ID of the ENI
|
||||
required: false
|
||||
default: null
|
||||
instance_id:
|
||||
description:
|
||||
- Instance ID that you wish to attach ENI to. To detach an ENI from an instance, use 'None'.
|
||||
required: false
|
||||
default: null
|
||||
private_ip_address:
|
||||
description:
|
||||
- Private IP address.
|
||||
required: false
|
||||
default: null
|
||||
subnet_id:
|
||||
description:
|
||||
- ID of subnet in which to create the ENI. Only required when state=present.
|
||||
required: true
|
||||
description:
|
||||
description:
|
||||
- Optional description of the ENI.
|
||||
required: false
|
||||
default: null
|
||||
security_groups:
|
||||
description:
|
||||
- List of security groups associated with the interface. Only used when state=present.
|
||||
required: false
|
||||
default: null
|
||||
state:
|
||||
description:
|
||||
- Create or delete ENI.
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
device_index:
|
||||
description:
|
||||
- The index of the device for the network interface attachment on the instance.
|
||||
required: false
|
||||
default: 0
|
||||
force_detach:
|
||||
description:
|
||||
- Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id to None or when deleting an interface with state=absent.
|
||||
required: false
|
||||
default: no
|
||||
delete_on_termination:
|
||||
description:
|
||||
- Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the interface is being modified, not on creation.
|
||||
required: false
|
||||
source_dest_check:
|
||||
description:
|
||||
- By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. You can only specify this flag when the interface is being modified, not on creation.
|
||||
required: false
|
||||
extends_documentation_fragment: aws
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Create an ENI. As no security group is defined, ENI will be created in default security group
|
||||
- ec2_eni:
|
||||
private_ip_address: 172.31.0.20
|
||||
subnet_id: subnet-xxxxxxxx
|
||||
state: present
|
||||
|
||||
# Create an ENI and attach it to an instance
|
||||
- ec2_eni:
|
||||
instance_id: i-xxxxxxx
|
||||
device_index: 1
|
||||
private_ip_address: 172.31.0.20
|
||||
subnet_id: subnet-xxxxxxxx
|
||||
state: present
|
||||
|
||||
# Destroy an ENI, detaching it from any instance if necessary
|
||||
- ec2_eni:
|
||||
eni_id: eni-xxxxxxx
|
||||
force_detach: yes
|
||||
state: absent
|
||||
|
||||
# Update an ENI
|
||||
- ec2_eni:
|
||||
eni_id: eni-xxxxxxx
|
||||
description: "My new description"
|
||||
state: present
|
||||
|
||||
# Detach an ENI from an instance
|
||||
- ec2_eni:
|
||||
eni_id: eni-xxxxxxx
|
||||
instance_id: None
|
||||
state: present
|
||||
|
||||
### Delete an interface on termination
|
||||
# First create the interface
|
||||
- ec2_eni:
|
||||
instance_id: i-xxxxxxx
|
||||
device_index: 1
|
||||
private_ip_address: 172.31.0.20
|
||||
subnet_id: subnet-xxxxxxxx
|
||||
state: present
|
||||
register: eni
|
||||
|
||||
# Modify the interface to enable the delete_on_terminaton flag
|
||||
- ec2_eni:
|
||||
eni_id: {{ "eni.interface.id" }}
|
||||
delete_on_termination: true
|
||||
|
||||
'''
|
||||
|
||||
import time
|
||||
import xml.etree.ElementTree as ET
|
||||
import re
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
|
||||
def get_error_message(xml_string):
|
||||
|
||||
root = ET.fromstring(xml_string)
|
||||
for message in root.findall('.//Message'):
|
||||
return message.text
|
||||
|
||||
|
||||
def get_eni_info(interface):
|
||||
|
||||
interface_info = {'id': interface.id,
|
||||
'subnet_id': interface.subnet_id,
|
||||
'vpc_id': interface.vpc_id,
|
||||
'description': interface.description,
|
||||
'owner_id': interface.owner_id,
|
||||
'status': interface.status,
|
||||
'mac_address': interface.mac_address,
|
||||
'private_ip_address': interface.private_ip_address,
|
||||
'source_dest_check': interface.source_dest_check,
|
||||
'groups': dict((group.id, group.name) for group in interface.groups),
|
||||
}
|
||||
|
||||
if interface.attachment is not None:
|
||||
interface_info['attachment'] = {'attachment_id': interface.attachment.id,
|
||||
'instance_id': interface.attachment.instance_id,
|
||||
'device_index': interface.attachment.device_index,
|
||||
'status': interface.attachment.status,
|
||||
'attach_time': interface.attachment.attach_time,
|
||||
'delete_on_termination': interface.attachment.delete_on_termination,
|
||||
}
|
||||
|
||||
return interface_info
|
||||
|
||||
def wait_for_eni(eni, status):
|
||||
|
||||
while True:
|
||||
time.sleep(3)
|
||||
eni.update()
|
||||
# If the status is detached we just need attachment to disappear
|
||||
if eni.attachment is None:
|
||||
if status == "detached":
|
||||
break
|
||||
else:
|
||||
if status == "attached" and eni.attachment.status == "attached":
|
||||
break
|
||||
|
||||
|
||||
def create_eni(connection, module):
|
||||
|
||||
instance_id = module.params.get("instance_id")
|
||||
if instance_id == 'None':
|
||||
instance_id = None
|
||||
do_detach = True
|
||||
else:
|
||||
do_detach = False
|
||||
device_index = module.params.get("device_index")
|
||||
subnet_id = module.params.get('subnet_id')
|
||||
private_ip_address = module.params.get('private_ip_address')
|
||||
description = module.params.get('description')
|
||||
security_groups = module.params.get('security_groups')
|
||||
changed = False
|
||||
|
||||
try:
|
||||
eni = compare_eni(connection, module)
|
||||
if eni is None:
|
||||
eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
|
||||
if instance_id is not None:
|
||||
try:
|
||||
eni.attach(instance_id, device_index)
|
||||
except BotoServerError as ex:
|
||||
eni.delete()
|
||||
raise
|
||||
changed = True
|
||||
# Wait to allow creation / attachment to finish
|
||||
wait_for_eni(eni, "attached")
|
||||
eni.update()
|
||||
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=get_error_message(e.args[2]))
|
||||
|
||||
module.exit_json(changed=changed, interface=get_eni_info(eni))
|
||||
|
||||
|
||||
def modify_eni(connection, module):
|
||||
|
||||
eni_id = module.params.get("eni_id")
|
||||
instance_id = module.params.get("instance_id")
|
||||
if instance_id == 'None':
|
||||
instance_id = None
|
||||
do_detach = True
|
||||
else:
|
||||
do_detach = False
|
||||
device_index = module.params.get("device_index")
|
||||
subnet_id = module.params.get('subnet_id')
|
||||
private_ip_address = module.params.get('private_ip_address')
|
||||
description = module.params.get('description')
|
||||
security_groups = module.params.get('security_groups')
|
||||
force_detach = module.params.get("force_detach")
|
||||
source_dest_check = module.params.get("source_dest_check")
|
||||
delete_on_termination = module.params.get("delete_on_termination")
|
||||
changed = False
|
||||
|
||||
|
||||
try:
|
||||
# Get the eni with the eni_id specified
|
||||
eni_result_set = connection.get_all_network_interfaces(eni_id)
|
||||
eni = eni_result_set[0]
|
||||
if description is not None:
|
||||
if eni.description != description:
|
||||
connection.modify_network_interface_attribute(eni.id, "description", description)
|
||||
changed = True
|
||||
if security_groups is not None:
|
||||
if sorted(get_sec_group_list(eni.groups)) != sorted(security_groups):
|
||||
connection.modify_network_interface_attribute(eni.id, "groupSet", security_groups)
|
||||
changed = True
|
||||
if source_dest_check is not None:
|
||||
if eni.source_dest_check != source_dest_check:
|
||||
connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check)
|
||||
changed = True
|
||||
if delete_on_termination is not None:
|
||||
if eni.attachment is not None:
|
||||
if eni.attachment.delete_on_termination is not delete_on_termination:
|
||||
connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id)
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg="Can not modify delete_on_termination as the interface is not attached")
|
||||
if eni.attachment is not None and instance_id is None and do_detach is True:
|
||||
eni.detach(force_detach)
|
||||
wait_for_eni(eni, "detached")
|
||||
changed = True
|
||||
else:
|
||||
if instance_id is not None:
|
||||
eni.attach(instance_id, device_index)
|
||||
wait_for_eni(eni, "attached")
|
||||
changed = True
|
||||
|
||||
except BotoServerError as e:
|
||||
print e
|
||||
module.fail_json(msg=get_error_message(e.args[2]))
|
||||
|
||||
eni.update()
|
||||
module.exit_json(changed=changed, interface=get_eni_info(eni))
|
||||
|
||||
|
||||
def delete_eni(connection, module):
|
||||
|
||||
eni_id = module.params.get("eni_id")
|
||||
force_detach = module.params.get("force_detach")
|
||||
|
||||
try:
|
||||
eni_result_set = connection.get_all_network_interfaces(eni_id)
|
||||
eni = eni_result_set[0]
|
||||
|
||||
if force_detach is True:
|
||||
if eni.attachment is not None:
|
||||
eni.detach(force_detach)
|
||||
# Wait to allow detachment to finish
|
||||
wait_for_eni(eni, "detached")
|
||||
eni.update()
|
||||
eni.delete()
|
||||
changed = True
|
||||
else:
|
||||
eni.delete()
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
except BotoServerError as e:
|
||||
msg = get_error_message(e.args[2])
|
||||
regex = re.compile('The networkInterface ID \'.*\' does not exist')
|
||||
if regex.search(msg) is not None:
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
module.fail_json(msg=get_error_message(e.args[2]))
|
||||
|
||||
def compare_eni(connection, module):
|
||||
|
||||
eni_id = module.params.get("eni_id")
|
||||
subnet_id = module.params.get('subnet_id')
|
||||
private_ip_address = module.params.get('private_ip_address')
|
||||
description = module.params.get('description')
|
||||
security_groups = module.params.get('security_groups')
|
||||
|
||||
try:
|
||||
all_eni = connection.get_all_network_interfaces(eni_id)
|
||||
|
||||
for eni in all_eni:
|
||||
remote_security_groups = get_sec_group_list(eni.groups)
|
||||
if (eni.subnet_id == subnet_id) and (eni.private_ip_address == private_ip_address) and (eni.description == description) and (remote_security_groups == security_groups):
|
||||
return eni
|
||||
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=get_error_message(e.args[2]))
|
||||
|
||||
return None
|
||||
|
||||
def get_sec_group_list(groups):
|
||||
|
||||
# Build list of remote security groups
|
||||
remote_security_groups = []
|
||||
for group in groups:
|
||||
remote_security_groups.append(group.id.encode())
|
||||
|
||||
return remote_security_groups
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
eni_id = dict(default=None),
|
||||
instance_id = dict(default=None),
|
||||
private_ip_address = dict(),
|
||||
subnet_id = dict(),
|
||||
description = dict(),
|
||||
security_groups = dict(type='list'),
|
||||
device_index = dict(default=0, type='int'),
|
||||
state = dict(default='present', choices=['present', 'absent']),
|
||||
force_detach = dict(default='no', type='bool'),
|
||||
source_dest_check = dict(default=None, type='bool'),
|
||||
delete_on_termination = dict(default=None, type='bool')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
state = module.params.get("state")
|
||||
eni_id = module.params.get("eni_id")
|
||||
|
||||
if state == 'present':
|
||||
if eni_id is None:
|
||||
if module.params.get("subnet_id") is None:
|
||||
module.fail_json(msg="subnet_id must be specified when state=present")
|
||||
create_eni(connection, module)
|
||||
else:
|
||||
modify_eni(connection, module)
|
||||
elif state == 'absent':
|
||||
if eni_id is None:
|
||||
module.fail_json(msg="eni_id must be specified")
|
||||
else:
|
||||
delete_eni(connection, module)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
|
||||
main()
|
135
cloud/amazon/ec2_eni_facts.py
Normal file
135
cloud/amazon/ec2_eni_facts.py
Normal file
|
@ -0,0 +1,135 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_eni_facts
|
||||
short_description: Gather facts about ec2 ENI interfaces in AWS
|
||||
description:
|
||||
- Gather facts about ec2 ENI interfaces in AWS
|
||||
version_added: "2.0"
|
||||
author: "Rob White (@wimnat)"
|
||||
options:
|
||||
eni_id:
|
||||
description:
|
||||
- The ID of the ENI. Pass this option to gather facts about a particular ENI, otherwise, all ENIs are returned.
|
||||
required: false
|
||||
default: null
|
||||
extends_documentation_fragment: aws
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Gather facts about all ENIs
|
||||
- ec2_eni_facts:
|
||||
|
||||
# Gather facts about a particular ENI
|
||||
- ec2_eni_facts:
|
||||
eni_id: eni-xxxxxxx
|
||||
|
||||
'''
|
||||
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
|
||||
def get_error_message(xml_string):
|
||||
|
||||
root = ET.fromstring(xml_string)
|
||||
for message in root.findall('.//Message'):
|
||||
return message.text
|
||||
|
||||
|
||||
def get_eni_info(interface):
|
||||
|
||||
interface_info = {'id': interface.id,
|
||||
'subnet_id': interface.subnet_id,
|
||||
'vpc_id': interface.vpc_id,
|
||||
'description': interface.description,
|
||||
'owner_id': interface.owner_id,
|
||||
'status': interface.status,
|
||||
'mac_address': interface.mac_address,
|
||||
'private_ip_address': interface.private_ip_address,
|
||||
'source_dest_check': interface.source_dest_check,
|
||||
'groups': dict((group.id, group.name) for group in interface.groups),
|
||||
}
|
||||
|
||||
if interface.attachment is not None:
|
||||
interface_info['attachment'] = {'attachment_id': interface.attachment.id,
|
||||
'instance_id': interface.attachment.instance_id,
|
||||
'device_index': interface.attachment.device_index,
|
||||
'status': interface.attachment.status,
|
||||
'attach_time': interface.attachment.attach_time,
|
||||
'delete_on_termination': interface.attachment.delete_on_termination,
|
||||
}
|
||||
|
||||
return interface_info
|
||||
|
||||
|
||||
def list_eni(connection, module):
|
||||
|
||||
eni_id = module.params.get("eni_id")
|
||||
interface_dict_array = []
|
||||
|
||||
try:
|
||||
all_eni = connection.get_all_network_interfaces(eni_id)
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=get_error_message(e.args[2]))
|
||||
|
||||
for interface in all_eni:
|
||||
interface_dict_array.append(get_eni_info(interface))
|
||||
|
||||
module.exit_json(interfaces=interface_dict_array)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
eni_id = dict(default=None)
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
list_eni(connection, module)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
|
||||
main()
|
159
cloud/amazon/ec2_vpc_igw.py
Normal file
159
cloud/amazon/ec2_vpc_igw.py
Normal file
|
@ -0,0 +1,159 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_vpc_igw
|
||||
short_description: Manage an AWS VPC Internet gateway
|
||||
description:
|
||||
- Manage an AWS VPC Internet gateway
|
||||
version_added: "2.0"
|
||||
author: Robert Estelle, @erydo
|
||||
options:
|
||||
vpc_id:
|
||||
description:
|
||||
- The VPC ID for the VPC in which to manage the Internet Gateway.
|
||||
required: true
|
||||
default: null
|
||||
state:
|
||||
description:
|
||||
- Create or terminate the IGW
|
||||
required: false
|
||||
default: present
|
||||
extends_documentation_fragment: aws
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Ensure that the VPC has an Internet Gateway.
|
||||
# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use
|
||||
# in setting up NATs etc.
|
||||
local_action:
|
||||
module: ec2_vpc_igw
|
||||
vpc_id: {{vpc.vpc_id}}
|
||||
region: {{vpc.vpc.region}}
|
||||
state: present
|
||||
register: igw
|
||||
'''
|
||||
|
||||
|
||||
import sys # noqa
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
import boto.vpc
|
||||
from boto.exception import EC2ResponseError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
if __name__ != '__main__':
|
||||
raise
|
||||
|
||||
|
||||
class AnsibleIGWException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def ensure_igw_absent(vpc_conn, vpc_id, check_mode):
|
||||
igws = vpc_conn.get_all_internet_gateways(
|
||||
filters={'attachment.vpc-id': vpc_id})
|
||||
|
||||
if not igws:
|
||||
return {'changed': False}
|
||||
|
||||
if check_mode:
|
||||
return {'changed': True}
|
||||
|
||||
for igw in igws:
|
||||
try:
|
||||
vpc_conn.detach_internet_gateway(igw.id, vpc_id)
|
||||
vpc_conn.delete_internet_gateway(igw.id)
|
||||
except EC2ResponseError as e:
|
||||
raise AnsibleIGWException(
|
||||
'Unable to delete Internet Gateway, error: {0}'.format(e))
|
||||
|
||||
return {'changed': True}
|
||||
|
||||
|
||||
def ensure_igw_present(vpc_conn, vpc_id, check_mode):
|
||||
igws = vpc_conn.get_all_internet_gateways(
|
||||
filters={'attachment.vpc-id': vpc_id})
|
||||
|
||||
if len(igws) > 1:
|
||||
raise AnsibleIGWException(
|
||||
'EC2 returned more than one Internet Gateway for VPC {0}, aborting'
|
||||
.format(vpc_id))
|
||||
|
||||
if igws:
|
||||
return {'changed': False, 'gateway_id': igws[0].id}
|
||||
else:
|
||||
if check_mode:
|
||||
return {'changed': True, 'gateway_id': None}
|
||||
|
||||
try:
|
||||
igw = vpc_conn.create_internet_gateway()
|
||||
vpc_conn.attach_internet_gateway(igw.id, vpc_id)
|
||||
return {'changed': True, 'gateway_id': igw.id}
|
||||
except EC2ResponseError as e:
|
||||
raise AnsibleIGWException(
|
||||
'Unable to create Internet Gateway, error: {0}'.format(e))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
vpc_id = dict(required=True),
|
||||
state = dict(choices=['present', 'absent'], default='present')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto is required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
vpc_id = module.params.get('vpc_id')
|
||||
state = module.params.get('state', 'present')
|
||||
|
||||
try:
|
||||
if state == 'present':
|
||||
result = ensure_igw_present(connection, vpc_id, check_mode=module.check_mode)
|
||||
elif state == 'absent':
|
||||
result = ensure_igw_absent(connection, vpc_id, check_mode=module.check_mode)
|
||||
except AnsibleIGWException as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
from ansible.module_utils.ec2 import * # noqa
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -7,7 +7,7 @@ short_description: gets the default administrator password for ec2 windows insta
|
|||
description:
|
||||
- Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module has a dependency on python-boto.
|
||||
version_added: "2.0"
|
||||
author: Rick Mendes(@rickmendes)
|
||||
author: "Rick Mendes (@rickmendes)"
|
||||
options:
|
||||
instance_id:
|
||||
description:
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: Manages account on Apache CloudStack based clouds.
|
|||
description:
|
||||
- Create, disable, lock, enable and remove accounts.
|
||||
version_added: '2.0'
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
author: "René Moser (@resmo)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: Manages affinity groups on Apache CloudStack based clouds.
|
|||
description:
|
||||
- Create and remove affinity groups.
|
||||
version_added: '2.0'
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
author: "René Moser (@resmo)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
|
221
cloud/cloudstack/cs_facts.py
Normal file
221
cloud/cloudstack/cs_facts.py
Normal file
|
@ -0,0 +1,221 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# (c) 2015, René Moser <mail@renemoser.net>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: cs_facts
|
||||
short_description: Gather facts on instances of Apache CloudStack based clouds.
|
||||
description:
|
||||
- This module fetches data from the metadata API in CloudStack. The module must be called from within the instance itself.
|
||||
version_added: '2.0'
|
||||
author: "René Moser (@resmo)"
|
||||
options:
|
||||
filter:
|
||||
description:
|
||||
- Filter for a specific fact.
|
||||
required: false
|
||||
default: null
|
||||
choices:
|
||||
- cloudstack_service_offering
|
||||
- cloudstack_availability_zone
|
||||
- cloudstack_public_hostname
|
||||
- cloudstack_public_ipv4
|
||||
- cloudstack_local_hostname
|
||||
- cloudstack_local_ipv4
|
||||
- cloudstack_instance_id
|
||||
- cloudstack_user_data
|
||||
requirements: [ 'yaml' ]
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Gather all facts on instances
|
||||
- name: Gather cloudstack facts
|
||||
cs_facts:
|
||||
|
||||
# Gather specific fact on instances
|
||||
- name: Gather cloudstack facts
|
||||
cs_facts: filter=cloudstack_instance_id
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
cloudstack_availability_zone:
|
||||
description: zone the instance is deployed in.
|
||||
returned: success
|
||||
type: string
|
||||
sample: ch-gva-2
|
||||
cloudstack_instance_id:
|
||||
description: UUID of the instance.
|
||||
returned: success
|
||||
type: string
|
||||
sample: ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
|
||||
cloudstack_local_hostname:
|
||||
description: local hostname of the instance.
|
||||
returned: success
|
||||
type: string
|
||||
sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
|
||||
cloudstack_local_ipv4:
|
||||
description: local IPv4 of the instance.
|
||||
returned: success
|
||||
type: string
|
||||
sample: 185.19.28.35
|
||||
cloudstack_public_hostname:
|
||||
description: public hostname of the instance.
|
||||
returned: success
|
||||
type: string
|
||||
sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
|
||||
cloudstack_public_ipv4:
|
||||
description: public IPv4 of the instance.
|
||||
returned: success
|
||||
type: string
|
||||
sample: 185.19.28.35
|
||||
cloudstack_service_offering:
|
||||
description: service offering of the instance.
|
||||
returned: success
|
||||
type: string
|
||||
sample: Micro 512mb 1cpu
|
||||
cloudstack_user_data:
|
||||
description: data of the instance provided by users.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: { "bla": "foo" }
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
try:
|
||||
import yaml
|
||||
has_lib_yaml = True
|
||||
except ImportError:
|
||||
has_lib_yaml = False
|
||||
|
||||
CS_METADATA_BASE_URL = "http://%s/latest/meta-data"
|
||||
CS_USERDATA_BASE_URL = "http://%s/latest/user-data"
|
||||
|
||||
class CloudStackFacts(object):
|
||||
|
||||
def __init__(self):
|
||||
self.facts = ansible_facts(module)
|
||||
self.api_ip = None
|
||||
self.fact_paths = {
|
||||
'cloudstack_service_offering': 'service-offering',
|
||||
'cloudstack_availability_zone': 'availability-zone',
|
||||
'cloudstack_public_hostname': 'public-hostname',
|
||||
'cloudstack_public_ipv4': 'public-ipv4',
|
||||
'cloudstack_local_hostname': 'local-hostname',
|
||||
'cloudstack_local_ipv4': 'local-ipv4',
|
||||
'cloudstack_instance_id': 'instance-id'
|
||||
}
|
||||
|
||||
def run(self):
|
||||
result = {}
|
||||
filter = module.params.get('filter')
|
||||
if not filter:
|
||||
for key,path in self.fact_paths.iteritems():
|
||||
result[key] = self._fetch(CS_METADATA_BASE_URL + "/" + path)
|
||||
result['cloudstack_user_data'] = self._get_user_data_json()
|
||||
else:
|
||||
if filter == 'cloudstack_user_data':
|
||||
result['cloudstack_user_data'] = self._get_user_data_json()
|
||||
elif filter in self.fact_paths:
|
||||
result[filter] = self._fetch(CS_METADATA_BASE_URL + "/" + self.fact_paths[filter])
|
||||
return result
|
||||
|
||||
|
||||
def _get_user_data_json(self):
|
||||
try:
|
||||
# this data come form users, we try what we can to parse it...
|
||||
return yaml.load(self._fetch(CS_USERDATA_BASE_URL))
|
||||
except:
|
||||
return None
|
||||
|
||||
|
||||
def _fetch(self, path):
|
||||
api_ip = self._get_api_ip()
|
||||
if not api_ip:
|
||||
return None
|
||||
api_url = path % api_ip
|
||||
(response, info) = fetch_url(module, api_url, force=True)
|
||||
if response:
|
||||
data = response.read()
|
||||
else:
|
||||
data = None
|
||||
return data
|
||||
|
||||
|
||||
def _get_dhcp_lease_file(self):
|
||||
"""Return the path of the lease file."""
|
||||
default_iface = self.facts['default_ipv4']['interface']
|
||||
dhcp_lease_file_locations = [
|
||||
'/var/lib/dhcp/dhclient.%s.leases' % default_iface, # debian / ubuntu
|
||||
'/var/lib/dhclient/dhclient-%s.leases' % default_iface, # centos 6
|
||||
'/var/lib/dhclient/dhclient--%s.lease' % default_iface, # centos 7
|
||||
'/var/db/dhclient.leases.%s' % default_iface, # openbsd
|
||||
]
|
||||
for file_path in dhcp_lease_file_locations:
|
||||
if os.path.exists(file_path):
|
||||
return file_path
|
||||
module.fail_json(msg="Could not find dhclient leases file.")
|
||||
|
||||
|
||||
def _get_api_ip(self):
|
||||
"""Return the IP of the DHCP server."""
|
||||
if not self.api_ip:
|
||||
dhcp_lease_file = self._get_dhcp_lease_file()
|
||||
for line in open(dhcp_lease_file):
|
||||
if 'dhcp-server-identifier' in line:
|
||||
# get IP of string "option dhcp-server-identifier 185.19.28.176;"
|
||||
line = line.translate(None, ';')
|
||||
self.api_ip = line.split()[2]
|
||||
break
|
||||
if not self.api_ip:
|
||||
module.fail_json(msg="No dhcp-server-identifier found in leases file.")
|
||||
return self.api_ip
|
||||
|
||||
|
||||
def main():
|
||||
global module
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
filter = dict(default=None, choices=[
|
||||
'cloudstack_service_offering',
|
||||
'cloudstack_availability_zone',
|
||||
'cloudstack_public_hostname',
|
||||
'cloudstack_public_ipv4',
|
||||
'cloudstack_local_hostname',
|
||||
'cloudstack_local_ipv4',
|
||||
'cloudstack_instance_id',
|
||||
'cloudstack_user_data',
|
||||
]),
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
if not has_lib_yaml:
|
||||
module.fail_json(msg="missing python library: yaml")
|
||||
|
||||
cs_facts = CloudStackFacts().run()
|
||||
cs_facts_result = dict(changed=False, ansible_facts=cs_facts)
|
||||
module.exit_json(**cs_facts_result)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
from ansible.module_utils.facts import *
|
||||
main()
|
|
@ -25,7 +25,7 @@ short_description: Manages firewall rules on Apache CloudStack based clouds.
|
|||
description:
|
||||
- Creates and removes firewall rules.
|
||||
version_added: '2.0'
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
author: "René Moser (@resmo)"
|
||||
options:
|
||||
ip_address:
|
||||
description:
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: Manages instances and virtual machines on Apache CloudStack b
|
|||
description:
|
||||
- Deploy, start, restart, stop and destroy instances.
|
||||
version_added: '2.0'
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
author: "René Moser (@resmo)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
@ -70,8 +70,8 @@ options:
|
|||
hypervisor:
|
||||
description:
|
||||
- Name the hypervisor to be used for creating the new instance.
|
||||
- Relevant when using C(state=present) and option C(ISO) is used.
|
||||
- If not set, first found hypervisor will be used.
|
||||
- Relevant when using C(state=present), but only considered if not set on ISO/template.
|
||||
- If not set or found on ISO/template, first found hypervisor will be used.
|
||||
required: false
|
||||
default: null
|
||||
choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ]
|
||||
|
@ -355,6 +355,8 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
|||
def __init__(self, module):
|
||||
AnsibleCloudStack.__init__(self, module)
|
||||
self.instance = None
|
||||
self.template = None
|
||||
self.iso = None
|
||||
|
||||
|
||||
def get_service_offering_id(self):
|
||||
|
@ -371,7 +373,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
|||
self.module.fail_json(msg="Service offering '%s' not found" % service_offering)
|
||||
|
||||
|
||||
def get_template_or_iso_id(self):
|
||||
def get_template_or_iso(self, key=None):
|
||||
template = self.module.params.get('template')
|
||||
iso = self.module.params.get('iso')
|
||||
|
||||
|
@ -388,21 +390,28 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
|||
args['zoneid'] = self.get_zone('id')
|
||||
|
||||
if template:
|
||||
if self.template:
|
||||
return self._get_by_key(key, self.template)
|
||||
|
||||
args['templatefilter'] = 'executable'
|
||||
templates = self.cs.listTemplates(**args)
|
||||
if templates:
|
||||
for t in templates['template']:
|
||||
if template in [ t['displaytext'], t['name'], t['id'] ]:
|
||||
return t['id']
|
||||
self.template = t
|
||||
return self._get_by_key(key, self.template)
|
||||
self.module.fail_json(msg="Template '%s' not found" % template)
|
||||
|
||||
elif iso:
|
||||
if self.iso:
|
||||
return self._get_by_key(key, self.iso)
|
||||
args['isofilter'] = 'executable'
|
||||
isos = self.cs.listIsos(**args)
|
||||
if isos:
|
||||
for i in isos['iso']:
|
||||
if iso in [ i['displaytext'], i['name'], i['id'] ]:
|
||||
return i['id']
|
||||
self.iso = i
|
||||
return self._get_by_key(key, self.iso)
|
||||
self.module.fail_json(msg="ISO '%s' not found" % iso)
|
||||
|
||||
|
||||
|
@ -503,7 +512,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
|||
self.result['changed'] = True
|
||||
|
||||
args = {}
|
||||
args['templateid'] = self.get_template_or_iso_id()
|
||||
args['templateid'] = self.get_template_or_iso(key='id')
|
||||
args['zoneid'] = self.get_zone('id')
|
||||
args['serviceofferingid'] = self.get_service_offering_id()
|
||||
args['account'] = self.get_account('name')
|
||||
|
@ -511,7 +520,6 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
|||
args['projectid'] = self.get_project('id')
|
||||
args['diskofferingid'] = self.get_disk_offering_id()
|
||||
args['networkids'] = self.get_network_ids()
|
||||
args['hypervisor'] = self.get_hypervisor()
|
||||
args['userdata'] = self.get_user_data()
|
||||
args['keyboard'] = self.module.params.get('keyboard')
|
||||
args['ipaddress'] = self.module.params.get('ip_address')
|
||||
|
@ -523,6 +531,10 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
|||
args['securitygroupnames'] = ','.join(self.module.params.get('security_groups'))
|
||||
args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups'))
|
||||
|
||||
template_iso = self.get_template_or_iso()
|
||||
if 'hypervisor' not in template_iso:
|
||||
args['hypervisor'] = self.get_hypervisor()
|
||||
|
||||
instance = None
|
||||
if not self.module.check_mode:
|
||||
instance = self.cs.deployVirtualMachine(**args)
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: Manages instance groups on Apache CloudStack based clouds.
|
|||
description:
|
||||
- Create and remove instance groups.
|
||||
version_added: '2.0'
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
author: "René Moser (@resmo)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: Manages ISOs images on Apache CloudStack based clouds.
|
|||
description:
|
||||
- Register and remove ISO images.
|
||||
version_added: '2.0'
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
author: "René Moser (@resmo)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
|
637
cloud/cloudstack/cs_network.py
Normal file
637
cloud/cloudstack/cs_network.py
Normal file
|
@ -0,0 +1,637 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# (c) 2015, René Moser <mail@renemoser.net>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: cs_network
|
||||
short_description: Manages networks on Apache CloudStack based clouds.
|
||||
description:
|
||||
- Create, update, restart and delete networks.
|
||||
version_added: '2.0'
|
||||
author: "René Moser (@resmo)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name (case sensitive) of the network.
|
||||
required: true
|
||||
displaytext:
|
||||
description:
|
||||
- Displaytext of the network.
|
||||
- If not specified, C(name) will be used as displaytext.
|
||||
required: false
|
||||
default: null
|
||||
network_offering:
|
||||
description:
|
||||
- Name of the offering for the network.
|
||||
- Required if C(state=present).
|
||||
required: false
|
||||
default: null
|
||||
start_ip:
|
||||
description:
|
||||
- The beginning IPv4 address of the network belongs to.
|
||||
- Only considered on create.
|
||||
required: false
|
||||
default: null
|
||||
end_ip:
|
||||
description:
|
||||
- The ending IPv4 address of the network belongs to.
|
||||
- If not specified, value of C(start_ip) is used.
|
||||
- Only considered on create.
|
||||
required: false
|
||||
default: null
|
||||
gateway:
|
||||
description:
|
||||
- The gateway of the network.
|
||||
- Required for shared networks and isolated networks when it belongs to VPC.
|
||||
- Only considered on create.
|
||||
required: false
|
||||
default: null
|
||||
netmask:
|
||||
description:
|
||||
- The netmask of the network.
|
||||
- Required for shared networks and isolated networks when it belongs to VPC.
|
||||
- Only considered on create.
|
||||
required: false
|
||||
default: null
|
||||
start_ipv6:
|
||||
description:
|
||||
- The beginning IPv6 address of the network belongs to.
|
||||
- Only considered on create.
|
||||
required: false
|
||||
default: null
|
||||
end_ipv6:
|
||||
description:
|
||||
- The ending IPv6 address of the network belongs to.
|
||||
- If not specified, value of C(start_ipv6) is used.
|
||||
- Only considered on create.
|
||||
required: false
|
||||
default: null
|
||||
cidr_ipv6:
|
||||
description:
|
||||
- CIDR of IPv6 network, must be at least /64.
|
||||
- Only considered on create.
|
||||
required: false
|
||||
default: null
|
||||
gateway_ipv6:
|
||||
description:
|
||||
- The gateway of the IPv6 network.
|
||||
- Required for shared networks.
|
||||
- Only considered on create.
|
||||
required: false
|
||||
default: null
|
||||
vlan:
|
||||
description:
|
||||
- The ID or VID of the network.
|
||||
required: false
|
||||
default: null
|
||||
vpc:
|
||||
description:
|
||||
- The ID or VID of the network.
|
||||
required: false
|
||||
default: null
|
||||
isolated_pvlan:
|
||||
description:
|
||||
- The isolated private vlan for this network.
|
||||
required: false
|
||||
default: null
|
||||
clean_up:
|
||||
description:
|
||||
- Cleanup old network elements.
|
||||
- Only considered on C(state=restarted).
|
||||
required: false
|
||||
default: false
|
||||
acl_type:
|
||||
description:
|
||||
- Access control type.
|
||||
- Only considered on create.
|
||||
required: false
|
||||
default: account
|
||||
choices: [ 'account', 'domain' ]
|
||||
network_domain:
|
||||
description:
|
||||
- The network domain.
|
||||
required: false
|
||||
default: null
|
||||
state:
|
||||
description:
|
||||
- State of the network.
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent', 'restarted' ]
|
||||
zone:
|
||||
description:
|
||||
- Name of the zone in which the network should be deployed.
|
||||
- If not set, default zone is used.
|
||||
required: false
|
||||
default: null
|
||||
project:
|
||||
description:
|
||||
- Name of the project the network to be deployed in.
|
||||
required: false
|
||||
default: null
|
||||
domain:
|
||||
description:
|
||||
- Domain the network is related to.
|
||||
required: false
|
||||
default: null
|
||||
account:
|
||||
description:
|
||||
- Account the network is related to.
|
||||
required: false
|
||||
default: null
|
||||
poll_async:
|
||||
description:
|
||||
- Poll async jobs until job has finished.
|
||||
required: false
|
||||
default: true
|
||||
extends_documentation_fragment: cloudstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# create a network
|
||||
- local_action:
|
||||
module: cs_network
|
||||
name: my network
|
||||
zone: gva-01
|
||||
network_offering: DefaultIsolatedNetworkOfferingWithSourceNatService
|
||||
network_domain: example.com
|
||||
|
||||
# update a network
|
||||
- local_action:
|
||||
module: cs_network
|
||||
name: my network
|
||||
displaytext: network of domain example.local
|
||||
network_domain: example.local
|
||||
|
||||
# restart a network with clean up
|
||||
- local_action:
|
||||
module: cs_network
|
||||
name: my network
|
||||
clean_up: yes
|
||||
state: restared
|
||||
|
||||
# remove a network
|
||||
- local_action:
|
||||
module: cs_network
|
||||
name: my network
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
id:
|
||||
description: ID of the network.
|
||||
returned: success
|
||||
type: string
|
||||
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
|
||||
name:
|
||||
description: Name of the network.
|
||||
returned: success
|
||||
type: string
|
||||
sample: web project
|
||||
displaytext:
|
||||
description: Display text of the network.
|
||||
returned: success
|
||||
type: string
|
||||
sample: web project
|
||||
dns1:
|
||||
description: IP address of the 1st nameserver.
|
||||
returned: success
|
||||
type: string
|
||||
sample: 1.2.3.4
|
||||
dns2:
|
||||
description: IP address of the 2nd nameserver.
|
||||
returned: success
|
||||
type: string
|
||||
sample: 1.2.3.4
|
||||
cidr:
|
||||
description: IPv4 network CIDR.
|
||||
returned: success
|
||||
type: string
|
||||
sample: 10.101.64.0/24
|
||||
gateway:
|
||||
description: IPv4 gateway.
|
||||
returned: success
|
||||
type: string
|
||||
sample: 10.101.64.1
|
||||
netmask:
|
||||
description: IPv4 netmask.
|
||||
returned: success
|
||||
type: string
|
||||
sample: 255.255.255.0
|
||||
cidr_ipv6:
|
||||
description: IPv6 network CIDR.
|
||||
returned: success
|
||||
type: string
|
||||
sample: 2001:db8::/64
|
||||
gateway_ipv6:
|
||||
description: IPv6 gateway.
|
||||
returned: success
|
||||
type: string
|
||||
sample: 2001:db8::1
|
||||
state:
|
||||
description: State of the network.
|
||||
returned: success
|
||||
type: string
|
||||
sample: Implemented
|
||||
zone:
|
||||
description: Name of zone.
|
||||
returned: success
|
||||
type: string
|
||||
sample: ch-gva-2
|
||||
domain:
|
||||
description: Domain the network is related to.
|
||||
returned: success
|
||||
type: string
|
||||
sample: ROOT
|
||||
account:
|
||||
description: Account the network is related to.
|
||||
returned: success
|
||||
type: string
|
||||
sample: example account
|
||||
project:
|
||||
description: Name of project.
|
||||
returned: success
|
||||
type: string
|
||||
sample: Production
|
||||
tags:
|
||||
description: List of resource tags associated with the network.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: '[ { "key": "foo", "value": "bar" } ]'
|
||||
acl_type:
|
||||
description: Access type of the network (Domain, Account).
|
||||
returned: success
|
||||
type: string
|
||||
sample: Account
|
||||
broadcast_domaintype:
|
||||
description: Broadcast domain type of the network.
|
||||
returned: success
|
||||
type: string
|
||||
sample: Vlan
|
||||
type:
|
||||
description: Type of the network.
|
||||
returned: success
|
||||
type: string
|
||||
sample: Isolated
|
||||
traffic_type:
|
||||
description: Traffic type of the network.
|
||||
returned: success
|
||||
type: string
|
||||
sample: Guest
|
||||
state:
|
||||
description: State of the network (Allocated, Implemented, Setup).
|
||||
returned: success
|
||||
type: string
|
||||
sample: Allocated
|
||||
is_persistent:
|
||||
description: Whether the network is persistent or not.
|
||||
returned: success
|
||||
type: boolean
|
||||
sample: false
|
||||
network_domain:
|
||||
description: The network domain
|
||||
returned: success
|
||||
type: string
|
||||
sample: example.local
|
||||
network_offering:
|
||||
description: The network offering name.
|
||||
returned: success
|
||||
type: string
|
||||
sample: DefaultIsolatedNetworkOfferingWithSourceNatService
|
||||
'''
|
||||
|
||||
try:
|
||||
from cs import CloudStack, CloudStackException, read_config
|
||||
has_lib_cs = True
|
||||
except ImportError:
|
||||
has_lib_cs = False
|
||||
|
||||
# import cloudstack common
|
||||
from ansible.module_utils.cloudstack import *
|
||||
|
||||
|
||||
class AnsibleCloudStackNetwork(AnsibleCloudStack):
|
||||
|
||||
def __init__(self, module):
|
||||
AnsibleCloudStack.__init__(self, module)
|
||||
self.network = None
|
||||
|
||||
|
||||
def get_or_fallback(self, key=None, fallback_key=None):
|
||||
value = self.module.params.get(key)
|
||||
if not value:
|
||||
value = self.module.params.get(fallback_key)
|
||||
return value
|
||||
|
||||
|
||||
def get_vpc(self, key=None):
|
||||
vpc = self.module.params.get('vpc')
|
||||
if not vpc:
|
||||
return None
|
||||
|
||||
args = {}
|
||||
args['account'] = self.get_account(key='name')
|
||||
args['domainid'] = self.get_domain(key='id')
|
||||
args['projectid'] = self.get_project(key='id')
|
||||
args['zoneid'] = self.get_zone(key='id')
|
||||
|
||||
vpcs = self.cs.listVPCs(**args)
|
||||
if vpcs:
|
||||
for v in vpcs['vpc']:
|
||||
if vpc in [ v['name'], v['displaytext'], v['id'] ]:
|
||||
return self._get_by_key(key, v)
|
||||
self.module.fail_json(msg="VPC '%s' not found" % vpc)
|
||||
|
||||
|
||||
def get_network_offering(self, key=None):
|
||||
network_offering = self.module.params.get('network_offering')
|
||||
if not network_offering:
|
||||
self.module.fail_json(msg="missing required arguments: network_offering")
|
||||
|
||||
args = {}
|
||||
args['zoneid'] = self.get_zone(key='id')
|
||||
|
||||
network_offerings = self.cs.listNetworkOfferings(**args)
|
||||
if network_offerings:
|
||||
for no in network_offerings['networkoffering']:
|
||||
if network_offering in [ no['name'], no['displaytext'], no['id'] ]:
|
||||
return self._get_by_key(key, no)
|
||||
self.module.fail_json(msg="Network offering '%s' not found" % network_offering)
|
||||
|
||||
|
||||
def _get_args(self):
|
||||
args = {}
|
||||
args['name'] = self.module.params.get('name')
|
||||
args['displaytext'] = self.get_or_fallback('displaytext','name')
|
||||
args['networkdomain'] = self.module.params.get('network_domain')
|
||||
args['networkofferingid'] = self.get_network_offering(key='id')
|
||||
return args
|
||||
|
||||
|
||||
def get_network(self):
|
||||
if not self.network:
|
||||
network = self.module.params.get('name')
|
||||
|
||||
args = {}
|
||||
args['zoneid'] = self.get_zone(key='id')
|
||||
args['projectid'] = self.get_project(key='id')
|
||||
args['account'] = self.get_account(key='name')
|
||||
args['domainid'] = self.get_domain(key='id')
|
||||
|
||||
networks = self.cs.listNetworks(**args)
|
||||
if networks:
|
||||
for n in networks['network']:
|
||||
if network in [ n['name'], n['displaytext'], n['id']]:
|
||||
self.network = n
|
||||
break
|
||||
return self.network
|
||||
|
||||
|
||||
def present_network(self):
|
||||
network = self.get_network()
|
||||
if not network:
|
||||
network = self.create_network(network)
|
||||
else:
|
||||
network = self.update_network(network)
|
||||
return network
|
||||
|
||||
|
||||
def update_network(self, network):
|
||||
args = self._get_args()
|
||||
args['id'] = network['id']
|
||||
|
||||
if self._has_changed(args, network):
|
||||
self.result['changed'] = True
|
||||
if not self.module.check_mode:
|
||||
network = self.cs.updateNetwork(**args)
|
||||
|
||||
if 'errortext' in network:
|
||||
self.module.fail_json(msg="Failed: '%s'" % network['errortext'])
|
||||
|
||||
poll_async = self.module.params.get('poll_async')
|
||||
if network and poll_async:
|
||||
network = self._poll_job(network, 'network')
|
||||
return network
|
||||
|
||||
|
||||
def create_network(self, network):
|
||||
self.result['changed'] = True
|
||||
|
||||
args = self._get_args()
|
||||
args['acltype'] = self.module.params.get('acl_type')
|
||||
args['zoneid'] = self.get_zone(key='id')
|
||||
args['projectid'] = self.get_project(key='id')
|
||||
args['account'] = self.get_account(key='name')
|
||||
args['domainid'] = self.get_domain(key='id')
|
||||
args['startip'] = self.module.params.get('start_ip')
|
||||
args['endip'] = self.get_or_fallback('end_ip', 'start_ip')
|
||||
args['netmask'] = self.module.params.get('netmask')
|
||||
args['gateway'] = self.module.params.get('gateway')
|
||||
args['startipv6'] = self.module.params.get('start_ipv6')
|
||||
args['endipv6'] = self.get_or_fallback('end_ipv6', 'start_ipv6')
|
||||
args['ip6cidr'] = self.module.params.get('cidr_ipv6')
|
||||
args['ip6gateway'] = self.module.params.get('gateway_ipv6')
|
||||
args['vlan'] = self.module.params.get('vlan')
|
||||
args['isolatedpvlan'] = self.module.params.get('isolated_pvlan')
|
||||
args['subdomainaccess'] = self.module.params.get('subdomain_access')
|
||||
args['vpcid'] = self.get_vpc(key='id')
|
||||
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.createNetwork(**args)
|
||||
|
||||
if 'errortext' in res:
|
||||
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||
|
||||
network = res['network']
|
||||
return network
|
||||
|
||||
|
||||
def restart_network(self):
|
||||
network = self.get_network()
|
||||
|
||||
if not network:
|
||||
self.module.fail_json(msg="No network named '%s' found." % self.module.params('name'))
|
||||
|
||||
# Restarting only available for these states
|
||||
if network['state'].lower() in [ 'implemented', 'setup' ]:
|
||||
self.result['changed'] = True
|
||||
|
||||
args = {}
|
||||
args['id'] = network['id']
|
||||
args['cleanup'] = self.module.params.get('clean_up')
|
||||
|
||||
if not self.module.check_mode:
|
||||
network = self.cs.restartNetwork(**args)
|
||||
|
||||
if 'errortext' in network:
|
||||
self.module.fail_json(msg="Failed: '%s'" % network['errortext'])
|
||||
|
||||
poll_async = self.module.params.get('poll_async')
|
||||
if network and poll_async:
|
||||
network = self._poll_job(network, 'network')
|
||||
return network
|
||||
|
||||
|
||||
def absent_network(self):
|
||||
network = self.get_network()
|
||||
if network:
|
||||
self.result['changed'] = True
|
||||
|
||||
args = {}
|
||||
args['id'] = network['id']
|
||||
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.deleteNetwork(**args)
|
||||
|
||||
if 'errortext' in res:
|
||||
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||
|
||||
poll_async = self.module.params.get('poll_async')
|
||||
if res and poll_async:
|
||||
res = self._poll_job(res, 'network')
|
||||
return network
|
||||
|
||||
|
||||
def get_result(self, network):
|
||||
if network:
|
||||
if 'id' in network:
|
||||
self.result['id'] = network['id']
|
||||
if 'name' in network:
|
||||
self.result['name'] = network['name']
|
||||
if 'displaytext' in network:
|
||||
self.result['displaytext'] = network['displaytext']
|
||||
if 'dns1' in network:
|
||||
self.result['dns1'] = network['dns1']
|
||||
if 'dns2' in network:
|
||||
self.result['dns2'] = network['dns2']
|
||||
if 'cidr' in network:
|
||||
self.result['cidr'] = network['cidr']
|
||||
if 'broadcastdomaintype' in network:
|
||||
self.result['broadcast_domaintype'] = network['broadcastdomaintype']
|
||||
if 'netmask' in network:
|
||||
self.result['netmask'] = network['netmask']
|
||||
if 'gateway' in network:
|
||||
self.result['gateway'] = network['gateway']
|
||||
if 'ip6cidr' in network:
|
||||
self.result['cidr_ipv6'] = network['ip6cidr']
|
||||
if 'ip6gateway' in network:
|
||||
self.result['gateway_ipv6'] = network['ip6gateway']
|
||||
if 'state' in network:
|
||||
self.result['state'] = network['state']
|
||||
if 'type' in network:
|
||||
self.result['type'] = network['type']
|
||||
if 'traffictype' in network:
|
||||
self.result['traffic_type'] = network['traffictype']
|
||||
if 'zone' in network:
|
||||
self.result['zone'] = network['zonename']
|
||||
if 'domain' in network:
|
||||
self.result['domain'] = network['domain']
|
||||
if 'account' in network:
|
||||
self.result['account'] = network['account']
|
||||
if 'project' in network:
|
||||
self.result['project'] = network['project']
|
||||
if 'acltype' in network:
|
||||
self.result['acl_type'] = network['acltype']
|
||||
if 'networkdomain' in network:
|
||||
self.result['network_domain'] = network['networkdomain']
|
||||
if 'networkofferingname' in network:
|
||||
self.result['network_offering'] = network['networkofferingname']
|
||||
if 'ispersistent' in network:
|
||||
self.result['is_persistent'] = network['ispersistent']
|
||||
if 'tags' in network:
|
||||
self.result['tags'] = []
|
||||
for tag in network['tags']:
|
||||
result_tag = {}
|
||||
result_tag['key'] = tag['key']
|
||||
result_tag['value'] = tag['value']
|
||||
self.result['tags'].append(result_tag)
|
||||
return self.result
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
name = dict(required=True),
|
||||
displaytext = dict(default=None),
|
||||
network_offering = dict(default=None),
|
||||
zone = dict(default=None),
|
||||
start_ip = dict(default=None),
|
||||
end_ip = dict(default=None),
|
||||
gateway = dict(default=None),
|
||||
netmask = dict(default=None),
|
||||
start_ipv6 = dict(default=None),
|
||||
end_ipv6 = dict(default=None),
|
||||
cidr_ipv6 = dict(default=None),
|
||||
gateway_ipv6 = dict(default=None),
|
||||
vlan = dict(default=None),
|
||||
vpc = dict(default=None),
|
||||
isolated_pvlan = dict(default=None),
|
||||
clean_up = dict(type='bool', choices=BOOLEANS, default=False),
|
||||
network_domain = dict(default=None),
|
||||
state = dict(choices=['present', 'absent', 'restarted' ], default='present'),
|
||||
acl_type = dict(choices=['account', 'domain'], default='account'),
|
||||
project = dict(default=None),
|
||||
domain = dict(default=None),
|
||||
account = dict(default=None),
|
||||
poll_async = dict(type='bool', choices=BOOLEANS, default=True),
|
||||
api_key = dict(default=None),
|
||||
api_secret = dict(default=None, no_log=True),
|
||||
api_url = dict(default=None),
|
||||
api_http_method = dict(choices=['get', 'post'], default='get'),
|
||||
api_timeout = dict(type='int', default=10),
|
||||
),
|
||||
required_together = (
|
||||
['api_key', 'api_secret', 'api_url'],
|
||||
['start_ip', 'netmask', 'gateway'],
|
||||
['start_ipv6', 'cidr_ipv6', 'gateway_ipv6'],
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if not has_lib_cs:
|
||||
module.fail_json(msg="python library cs required: pip install cs")
|
||||
|
||||
try:
|
||||
acs_network = AnsibleCloudStackNetwork(module)
|
||||
|
||||
state = module.params.get('state')
|
||||
if state in ['absent']:
|
||||
network = acs_network.absent_network()
|
||||
|
||||
elif state in ['restarted']:
|
||||
network = acs_network.restart_network()
|
||||
|
||||
else:
|
||||
network = acs_network.present_network()
|
||||
|
||||
result = acs_network.get_result(network)
|
||||
|
||||
except CloudStackException, e:
|
||||
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||
|
||||
except Exception, e:
|
||||
module.fail_json(msg='Exception: %s' % str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
|
@ -25,7 +25,7 @@ short_description: Manages port forwarding rules on Apache CloudStack based clou
|
|||
description:
|
||||
- Create, update and remove port forwarding rules.
|
||||
version_added: '2.0'
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
author: "René Moser (@resmo)"
|
||||
options:
|
||||
ip_address:
|
||||
description:
|
||||
|
|
|
@ -25,7 +25,8 @@ short_description: Manages projects on Apache CloudStack based clouds.
|
|||
description:
|
||||
- Create, update, suspend, activate and remove projects.
|
||||
version_added: '2.0'
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
author: "René Moser (@resmo)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the project.
|
||||
|
@ -159,14 +160,13 @@ class AnsibleCloudStackProject(AnsibleCloudStack):
|
|||
project = self.module.params.get('name')
|
||||
|
||||
args = {}
|
||||
args['listall'] = True
|
||||
args['account'] = self.get_account(key='name')
|
||||
args['domainid'] = self.get_domain(key='id')
|
||||
|
||||
projects = self.cs.listProjects(**args)
|
||||
if projects:
|
||||
for p in projects['project']:
|
||||
if project in [ p['name'], p['id']]:
|
||||
if project.lower() in [ p['name'].lower(), p['id']]:
|
||||
self.project = p
|
||||
break
|
||||
return self.project
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: Manages security groups on Apache CloudStack based clouds.
|
|||
description:
|
||||
- Create and remove security groups.
|
||||
version_added: '2.0'
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
author: "René Moser (@resmo)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: Manages security group rules on Apache CloudStack based cloud
|
|||
description:
|
||||
- Add and remove security group rules.
|
||||
version_added: '2.0'
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
author: "René Moser (@resmo)"
|
||||
options:
|
||||
security_group:
|
||||
description:
|
||||
|
|
|
@ -27,7 +27,7 @@ description:
|
|||
- If no key was found and no public key was provided and a new SSH
|
||||
private/public key pair will be created and the private key will be returned.
|
||||
version_added: '2.0'
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
author: "René Moser (@resmo)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
|
633
cloud/cloudstack/cs_template.py
Normal file
633
cloud/cloudstack/cs_template.py
Normal file
|
@ -0,0 +1,633 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# (c) 2015, René Moser <mail@renemoser.net>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: cs_template
|
||||
short_description: Manages templates on Apache CloudStack based clouds.
|
||||
description:
|
||||
- Register a template from URL, create a template from a ROOT volume of a stopped VM or its snapshot and delete templates.
|
||||
version_added: '2.0'
|
||||
author: "René Moser (@resmo)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the template.
|
||||
required: true
|
||||
url:
|
||||
description:
|
||||
- URL of where the template is hosted.
|
||||
- Mutually exclusive with C(vm).
|
||||
required: false
|
||||
default: null
|
||||
vm:
|
||||
description:
|
||||
- VM name the template will be created from its volume or alternatively from a snapshot.
|
||||
- VM must be in stopped state if created from its volume.
|
||||
- Mutually exclusive with C(url).
|
||||
required: false
|
||||
default: null
|
||||
snapshot:
|
||||
description:
|
||||
- Name of the snapshot, created from the VM ROOT volume, the template will be created from.
|
||||
- C(vm) is required together with this argument.
|
||||
required: false
|
||||
default: null
|
||||
os_type:
|
||||
description:
|
||||
- OS type that best represents the OS of this template.
|
||||
required: false
|
||||
default: null
|
||||
checksum:
|
||||
description:
|
||||
- The MD5 checksum value of this template.
|
||||
- If set, we search by checksum instead of name.
|
||||
required: false
|
||||
default: false
|
||||
is_ready:
|
||||
description:
|
||||
- This flag is used for searching existing templates.
|
||||
- If set to C(true), it will only list template ready for deployment e.g. successfully downloaded and installed.
|
||||
- Recommended to set it to C(false).
|
||||
required: false
|
||||
default: false
|
||||
is_public:
|
||||
description:
|
||||
- Register the template to be publicly available to all users.
|
||||
- Only used if C(state) is present.
|
||||
required: false
|
||||
default: false
|
||||
is_featured:
|
||||
description:
|
||||
- Register the template to be featured.
|
||||
- Only used if C(state) is present.
|
||||
required: false
|
||||
default: false
|
||||
is_dynamically_scalable:
|
||||
description:
|
||||
- Register the template having XS/VMWare tools installed in order to support dynamic scaling of VM CPU/memory.
|
||||
- Only used if C(state) is present.
|
||||
required: false
|
||||
default: false
|
||||
project:
|
||||
description:
|
||||
- Name of the project the template to be registered in.
|
||||
required: false
|
||||
default: null
|
||||
zone:
|
||||
description:
|
||||
- Name of the zone you wish the template to be registered or deleted from.
|
||||
- If not specified, first found zone will be used.
|
||||
required: false
|
||||
default: null
|
||||
template_filter:
|
||||
description:
|
||||
- Name of the filter used to search for the template.
|
||||
required: false
|
||||
default: 'self'
|
||||
choices: [ 'featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community' ]
|
||||
hypervisor:
|
||||
description:
|
||||
- Name the hypervisor to be used for creating the new template.
|
||||
- Relevant when using C(state=present).
|
||||
required: false
|
||||
default: none
|
||||
choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ]
|
||||
requires_hvm:
|
||||
description:
|
||||
- true if this template requires HVM.
|
||||
required: false
|
||||
default: false
|
||||
password_enabled:
|
||||
description:
|
||||
- True if the template supports the password reset feature.
|
||||
required: false
|
||||
default: false
|
||||
template_tag:
|
||||
description:
|
||||
- the tag for this template.
|
||||
required: false
|
||||
default: null
|
||||
sshkey_enabled:
|
||||
description:
|
||||
- True if the template supports the sshkey upload feature.
|
||||
required: false
|
||||
default: false
|
||||
is_routing:
|
||||
description:
|
||||
- True if the template type is routing i.e., if template is used to deploy router.
|
||||
- Only considered if C(url) is used.
|
||||
required: false
|
||||
default: false
|
||||
format:
|
||||
description:
|
||||
- The format for the template.
|
||||
- Relevant when using C(state=present).
|
||||
required: false
|
||||
default: null
|
||||
choices: [ 'QCOW2', 'RAW', 'VHD', 'OVA' ]
|
||||
is_extractable:
|
||||
description:
|
||||
- True if the template or its derivatives are extractable.
|
||||
required: false
|
||||
default: false
|
||||
details:
|
||||
description:
|
||||
- Template details in key/value pairs.
|
||||
required: false
|
||||
default: null
|
||||
bits:
|
||||
description:
|
||||
- 32 or 64 bits support.
|
||||
required: false
|
||||
default: '64'
|
||||
displaytext:
|
||||
description:
|
||||
- the display text of the template.
|
||||
required: true
|
||||
default: null
|
||||
state:
|
||||
description:
|
||||
- State of the template.
|
||||
required: false
|
||||
default: 'present'
|
||||
choices: [ 'present', 'absent' ]
|
||||
poll_async:
|
||||
description:
|
||||
- Poll async jobs until job has finished.
|
||||
required: false
|
||||
default: true
|
||||
extends_documentation_fragment: cloudstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Register a systemvm template
|
||||
- local_action:
|
||||
module: cs_template
|
||||
name: systemvm-4.5
|
||||
url: "http://packages.shapeblue.com/systemvmtemplate/4.5/systemvm64template-4.5-vmware.ova"
|
||||
hypervisor: VMware
|
||||
format: OVA
|
||||
zone: tokio-ix
|
||||
os_type: Debian GNU/Linux 7(64-bit)
|
||||
is_routing: yes
|
||||
|
||||
# Create a template from a stopped virtual machine's volume
|
||||
- local_action:
|
||||
module: cs_template
|
||||
name: debian-base-template
|
||||
vm: debian-base-vm
|
||||
os_type: Debian GNU/Linux 7(64-bit)
|
||||
zone: tokio-ix
|
||||
password_enabled: yes
|
||||
is_public: yes
|
||||
|
||||
# Create a template from a virtual machine's root volume snapshot
|
||||
- local_action:
|
||||
module: cs_template
|
||||
name: debian-base-template
|
||||
vm: debian-base-vm
|
||||
snapshot: ROOT-233_2015061509114
|
||||
os_type: Debian GNU/Linux 7(64-bit)
|
||||
zone: tokio-ix
|
||||
password_enabled: yes
|
||||
is_public: yes
|
||||
|
||||
# Remove a template
|
||||
- local_action:
|
||||
module: cs_template
|
||||
name: systemvm-4.2
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
name:
|
||||
description: Name of the template.
|
||||
returned: success
|
||||
type: string
|
||||
sample: Debian 7 64-bit
|
||||
displaytext:
|
||||
description: Displaytext of the template.
|
||||
returned: success
|
||||
type: string
|
||||
sample: Debian 7.7 64-bit minimal 2015-03-19
|
||||
checksum:
|
||||
description: MD5 checksum of the template.
|
||||
returned: success
|
||||
type: string
|
||||
sample: 0b31bccccb048d20b551f70830bb7ad0
|
||||
status:
|
||||
description: Status of the template.
|
||||
returned: success
|
||||
type: string
|
||||
sample: Download Complete
|
||||
is_ready:
|
||||
description: True if the template is ready to be deployed from.
|
||||
returned: success
|
||||
type: boolean
|
||||
sample: true
|
||||
is_public:
|
||||
description: True if the template is public.
|
||||
returned: success
|
||||
type: boolean
|
||||
sample: true
|
||||
is_featured:
|
||||
description: True if the template is featured.
|
||||
returned: success
|
||||
type: boolean
|
||||
sample: true
|
||||
is_extractable:
|
||||
description: True if the template is extractable.
|
||||
returned: success
|
||||
type: boolean
|
||||
sample: true
|
||||
format:
|
||||
description: Format of the template.
|
||||
returned: success
|
||||
type: string
|
||||
sample: OVA
|
||||
os_type:
|
||||
description: Typo of the OS.
|
||||
returned: success
|
||||
type: string
|
||||
sample: CentOS 6.5 (64-bit)
|
||||
password_enabled:
|
||||
description: True if the reset password feature is enabled, false otherwise.
|
||||
returned: success
|
||||
type: boolean
|
||||
sample: false
|
||||
sshkey_enabled:
|
||||
description: true if template is sshkey enabled, false otherwise.
|
||||
returned: success
|
||||
type: boolean
|
||||
sample: false
|
||||
cross_zones:
|
||||
description: true if the template is managed across all zones, false otherwise.
|
||||
returned: success
|
||||
type: boolean
|
||||
sample: false
|
||||
template_type:
|
||||
description: Type of the template.
|
||||
returned: success
|
||||
type: string
|
||||
sample: USER
|
||||
created:
|
||||
description: Date of registering.
|
||||
returned: success
|
||||
type: string
|
||||
sample: 2015-03-29T14:57:06+0200
|
||||
template_tag:
|
||||
description: Template tag related to this template.
|
||||
returned: success
|
||||
type: string
|
||||
sample: special
|
||||
hypervisor:
|
||||
description: Hypervisor related to this template.
|
||||
returned: success
|
||||
type: string
|
||||
sample: VMware
|
||||
tags:
|
||||
description: List of resource tags associated with the template.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: '[ { "key": "foo", "value": "bar" } ]'
|
||||
zone:
|
||||
description: Name of zone the template is registered in.
|
||||
returned: success
|
||||
type: string
|
||||
sample: zuerich
|
||||
domain:
|
||||
description: Domain the template is related to.
|
||||
returned: success
|
||||
type: string
|
||||
sample: example domain
|
||||
account:
|
||||
description: Account the template is related to.
|
||||
returned: success
|
||||
type: string
|
||||
sample: example account
|
||||
project:
|
||||
description: Name of project the template is related to.
|
||||
returned: success
|
||||
type: string
|
||||
sample: Production
|
||||
'''
|
||||
|
||||
try:
|
||||
from cs import CloudStack, CloudStackException, read_config
|
||||
has_lib_cs = True
|
||||
except ImportError:
|
||||
has_lib_cs = False
|
||||
|
||||
# import cloudstack common
|
||||
from ansible.module_utils.cloudstack import *
|
||||
|
||||
|
||||
class AnsibleCloudStackTemplate(AnsibleCloudStack):
|
||||
|
||||
def __init__(self, module):
|
||||
AnsibleCloudStack.__init__(self, module)
|
||||
|
||||
|
||||
def _get_args(self):
|
||||
args = {}
|
||||
args['name'] = self.module.params.get('name')
|
||||
args['displaytext'] = self.module.params.get('displaytext')
|
||||
args['bits'] = self.module.params.get('bits')
|
||||
args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable')
|
||||
args['isextractable'] = self.module.params.get('is_extractable')
|
||||
args['isfeatured'] = self.module.params.get('is_featured')
|
||||
args['ispublic'] = self.module.params.get('is_public')
|
||||
args['passwordenabled'] = self.module.params.get('password_enabled')
|
||||
args['requireshvm'] = self.module.params.get('requires_hvm')
|
||||
args['templatetag'] = self.module.params.get('template_tag')
|
||||
args['ostypeid'] = self.get_os_type(key='id')
|
||||
|
||||
if not args['ostypeid']:
|
||||
self.module.fail_json(msg="Missing required arguments: os_type")
|
||||
|
||||
if not args['displaytext']:
|
||||
args['displaytext'] = self.module.params.get('name')
|
||||
return args
|
||||
|
||||
|
||||
def get_root_volume(self, key=None):
|
||||
args = {}
|
||||
args['account'] = self.get_account(key='name')
|
||||
args['domainid'] = self.get_domain(key='id')
|
||||
args['projectid'] = self.get_project(key='id')
|
||||
args['virtualmachineid'] = self.get_vm(key='id')
|
||||
args['type'] = "ROOT"
|
||||
|
||||
volumes = self.cs.listVolumes(**args)
|
||||
if volumes:
|
||||
return self._get_by_key(key, volumes['volume'][0])
|
||||
self.module.fail_json(msg="Root volume for '%s' not found" % self.get_vm('name'))
|
||||
|
||||
|
||||
def get_snapshot(self, key=None):
|
||||
snapshot = self.module.params.get('snapshot')
|
||||
if not snapshot:
|
||||
return None
|
||||
|
||||
args = {}
|
||||
args['account'] = self.get_account(key='name')
|
||||
args['domainid'] = self.get_domain(key='id')
|
||||
args['projectid'] = self.get_project(key='id')
|
||||
args['volumeid'] = self.get_root_volume('id')
|
||||
snapshots = self.cs.listSnapshots(**args)
|
||||
if snapshots:
|
||||
for s in snapshots['snapshot']:
|
||||
if snapshot in [ s['name'], s['id'] ]:
|
||||
return self._get_by_key(key, s)
|
||||
self.module.fail_json(msg="Snapshot '%s' not found" % snapshot)
|
||||
|
||||
|
||||
def create_template(self):
|
||||
template = self.get_template()
|
||||
if not template:
|
||||
self.result['changed'] = True
|
||||
|
||||
args = self._get_args()
|
||||
snapshot_id = self.get_snapshot(key='id')
|
||||
if snapshot_id:
|
||||
args['snapshotid'] = snapshot_id
|
||||
else:
|
||||
args['volumeid'] = self.get_root_volume('id')
|
||||
|
||||
if not self.module.check_mode:
|
||||
template = self.cs.createTemplate(**args)
|
||||
|
||||
if 'errortext' in template:
|
||||
self.module.fail_json(msg="Failed: '%s'" % template['errortext'])
|
||||
|
||||
poll_async = self.module.params.get('poll_async')
|
||||
if poll_async:
|
||||
template = self._poll_job(template, 'template')
|
||||
return template
|
||||
|
||||
|
||||
def register_template(self):
|
||||
template = self.get_template()
|
||||
if not template:
|
||||
self.result['changed'] = True
|
||||
args = self._get_args()
|
||||
args['url'] = self.module.params.get('url')
|
||||
args['format'] = self.module.params.get('format')
|
||||
args['checksum'] = self.module.params.get('checksum')
|
||||
args['isextractable'] = self.module.params.get('is_extractable')
|
||||
args['isrouting'] = self.module.params.get('is_routing')
|
||||
args['sshkeyenabled'] = self.module.params.get('sshkey_enabled')
|
||||
args['hypervisor'] = self.get_hypervisor()
|
||||
args['zoneid'] = self.get_zone(key='id')
|
||||
args['domainid'] = self.get_domain(key='id')
|
||||
args['account'] = self.get_account(key='name')
|
||||
args['projectid'] = self.get_project(key='id')
|
||||
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.registerTemplate(**args)
|
||||
if 'errortext' in res:
|
||||
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||
template = res['template']
|
||||
return template
|
||||
|
||||
|
||||
def get_template(self):
|
||||
args = {}
|
||||
args['isready'] = self.module.params.get('is_ready')
|
||||
args['templatefilter'] = self.module.params.get('template_filter')
|
||||
args['zoneid'] = self.get_zone(key='id')
|
||||
args['domainid'] = self.get_domain(key='id')
|
||||
args['account'] = self.get_account(key='name')
|
||||
args['projectid'] = self.get_project(key='id')
|
||||
|
||||
# if checksum is set, we only look on that.
|
||||
checksum = self.module.params.get('checksum')
|
||||
if not checksum:
|
||||
args['name'] = self.module.params.get('name')
|
||||
|
||||
templates = self.cs.listTemplates(**args)
|
||||
if templates:
|
||||
# if checksum is set, we only look on that.
|
||||
if not checksum:
|
||||
return templates['template'][0]
|
||||
else:
|
||||
for i in templates['template']:
|
||||
if i['checksum'] == checksum:
|
||||
return i
|
||||
return None
|
||||
|
||||
|
||||
def remove_template(self):
|
||||
template = self.get_template()
|
||||
if template:
|
||||
self.result['changed'] = True
|
||||
|
||||
args = {}
|
||||
args['id'] = template['id']
|
||||
args['zoneid'] = self.get_zone(key='id')
|
||||
|
||||
if not self.module.check_mode:
|
||||
res = self.cs.deleteTemplate(**args)
|
||||
|
||||
if 'errortext' in res:
|
||||
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||
|
||||
poll_async = self.module.params.get('poll_async')
|
||||
if poll_async:
|
||||
res = self._poll_job(res, 'template')
|
||||
return template
|
||||
|
||||
|
||||
def get_result(self, template):
|
||||
if template:
|
||||
if 'displaytext' in template:
|
||||
self.result['displaytext'] = template['displaytext']
|
||||
if 'name' in template:
|
||||
self.result['name'] = template['name']
|
||||
if 'hypervisor' in template:
|
||||
self.result['hypervisor'] = template['hypervisor']
|
||||
if 'zonename' in template:
|
||||
self.result['zone'] = template['zonename']
|
||||
if 'checksum' in template:
|
||||
self.result['checksum'] = template['checksum']
|
||||
if 'format' in template:
|
||||
self.result['format'] = template['format']
|
||||
if 'isready' in template:
|
||||
self.result['is_ready'] = template['isready']
|
||||
if 'ispublic' in template:
|
||||
self.result['is_public'] = template['ispublic']
|
||||
if 'isfeatured' in template:
|
||||
self.result['is_featured'] = template['isfeatured']
|
||||
if 'isextractable' in template:
|
||||
self.result['is_extractable'] = template['isextractable']
|
||||
# and yes! it is really camelCase!
|
||||
if 'crossZones' in template:
|
||||
self.result['cross_zones'] = template['crossZones']
|
||||
if 'ostypename' in template:
|
||||
self.result['os_type'] = template['ostypename']
|
||||
if 'templatetype' in template:
|
||||
self.result['template_type'] = template['templatetype']
|
||||
if 'passwordenabled' in template:
|
||||
self.result['password_enabled'] = template['passwordenabled']
|
||||
if 'sshkeyenabled' in template:
|
||||
self.result['sshkey_enabled'] = template['sshkeyenabled']
|
||||
if 'status' in template:
|
||||
self.result['status'] = template['status']
|
||||
if 'created' in template:
|
||||
self.result['created'] = template['created']
|
||||
if 'templatetag' in template:
|
||||
self.result['template_tag'] = template['templatetag']
|
||||
if 'tags' in template:
|
||||
self.result['tags'] = []
|
||||
for tag in template['tags']:
|
||||
result_tag = {}
|
||||
result_tag['key'] = tag['key']
|
||||
result_tag['value'] = tag['value']
|
||||
self.result['tags'].append(result_tag)
|
||||
if 'domain' in template:
|
||||
self.result['domain'] = template['domain']
|
||||
if 'account' in template:
|
||||
self.result['account'] = template['account']
|
||||
if 'project' in template:
|
||||
self.result['project'] = template['project']
|
||||
return self.result
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
name = dict(required=True),
|
||||
displaytext = dict(default=None),
|
||||
url = dict(default=None),
|
||||
vm = dict(default=None),
|
||||
snapshot = dict(default=None),
|
||||
os_type = dict(default=None),
|
||||
is_ready = dict(type='bool', choices=BOOLEANS, default=False),
|
||||
is_public = dict(type='bool', choices=BOOLEANS, default=True),
|
||||
is_featured = dict(type='bool', choices=BOOLEANS, default=False),
|
||||
is_dynamically_scalable = dict(type='bool', choices=BOOLEANS, default=False),
|
||||
is_extractable = dict(type='bool', choices=BOOLEANS, default=False),
|
||||
is_routing = dict(type='bool', choices=BOOLEANS, default=False),
|
||||
checksum = dict(default=None),
|
||||
template_filter = dict(default='self', choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']),
|
||||
hypervisor = dict(choices=['KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM'], default=None),
|
||||
requires_hvm = dict(type='bool', choices=BOOLEANS, default=False),
|
||||
password_enabled = dict(type='bool', choices=BOOLEANS, default=False),
|
||||
template_tag = dict(default=None),
|
||||
sshkey_enabled = dict(type='bool', choices=BOOLEANS, default=False),
|
||||
format = dict(choices=['QCOW2', 'RAW', 'VHD', 'OVA'], default=None),
|
||||
details = dict(default=None),
|
||||
bits = dict(type='int', choices=[ 32, 64 ], default=64),
|
||||
state = dict(choices=['present', 'absent'], default='present'),
|
||||
zone = dict(default=None),
|
||||
domain = dict(default=None),
|
||||
account = dict(default=None),
|
||||
project = dict(default=None),
|
||||
poll_async = dict(type='bool', choices=BOOLEANS, default=True),
|
||||
api_key = dict(default=None),
|
||||
api_secret = dict(default=None),
|
||||
api_url = dict(default=None),
|
||||
api_http_method = dict(choices=['get', 'post'], default='get'),
|
||||
api_timeout = dict(type='int', default=10),
|
||||
),
|
||||
mutually_exclusive = (
|
||||
['url', 'vm'],
|
||||
),
|
||||
required_together = (
|
||||
['api_key', 'api_secret', 'api_url'],
|
||||
['format', 'url', 'hypervisor'],
|
||||
),
|
||||
required_one_of = (
|
||||
['url', 'vm'],
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if not has_lib_cs:
|
||||
module.fail_json(msg="python library cs required: pip install cs")
|
||||
|
||||
try:
|
||||
acs_tpl = AnsibleCloudStackTemplate(module)
|
||||
|
||||
state = module.params.get('state')
|
||||
if state in ['absent']:
|
||||
tpl = acs_tpl.remove_template()
|
||||
else:
|
||||
url = module.params.get('url')
|
||||
if url:
|
||||
tpl = acs_tpl.register_template()
|
||||
else:
|
||||
tpl = acs_tpl.create_template()
|
||||
|
||||
result = acs_tpl.get_result(tpl)
|
||||
|
||||
except CloudStackException, e:
|
||||
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||
|
||||
except Exception, e:
|
||||
module.fail_json(msg='Exception: %s' % str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
|
@ -25,7 +25,7 @@ short_description: Manages VM snapshots on Apache CloudStack based clouds.
|
|||
description:
|
||||
- Create, remove and revert VM from snapshots.
|
||||
version_added: '2.0'
|
||||
author: '"René Moser (@resmo)" <mail@renemoser.net>'
|
||||
author: "René Moser (@resmo)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
|
|
@ -81,7 +81,7 @@ options:
|
|||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "apache-libcloud"
|
||||
author: '"Peter Tan (@tanpeter)" <ptan@google.com>'
|
||||
author: "Peter Tan (@tanpeter)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -26,7 +26,7 @@ short_description: Manage LXC Containers
|
|||
version_added: 1.8.0
|
||||
description:
|
||||
- Management of LXC containers
|
||||
author: '"Kevin Carter (@cloudnull)" <kevin.carter@rackspace.com>'
|
||||
author: "Kevin Carter (@cloudnull)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
@ -173,9 +173,9 @@ options:
|
|||
- list of 'key=value' options to use when configuring a container.
|
||||
required: false
|
||||
requirements:
|
||||
- 'lxc >= 1.0'
|
||||
- 'python >= 2.6'
|
||||
- 'python2-lxc >= 0.1'
|
||||
- 'lxc >= 1.0 # OS package'
|
||||
- 'python >= 2.6 # OS Package'
|
||||
- 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc'
|
||||
notes:
|
||||
- Containers must have a unique name. If you attempt to create a container
|
||||
with a name that already exists in the users namespace the module will
|
||||
|
@ -195,7 +195,8 @@ notes:
|
|||
creating the archive.
|
||||
- If your distro does not have a package for "python2-lxc", which is a
|
||||
requirement for this module, it can be installed from source at
|
||||
"https://github.com/lxc/python2-lxc"
|
||||
"https://github.com/lxc/python2-lxc" or installed via pip using the package
|
||||
name lxc-python2.
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
|
@ -384,6 +385,8 @@ try:
|
|||
import lxc
|
||||
except ImportError:
|
||||
HAS_LXC = False
|
||||
else:
|
||||
HAS_LXC = True
|
||||
|
||||
|
||||
# LXC_COMPRESSION_MAP is a map of available compression types when creating
|
||||
|
@ -707,7 +710,7 @@ class LxcContainerManagement(object):
|
|||
for option_line in container_config:
|
||||
# Look for key in config
|
||||
if option_line.startswith(key):
|
||||
_, _value = option_line.split('=')
|
||||
_, _value = option_line.split('=', 1)
|
||||
config_value = ' '.join(_value.split())
|
||||
line_index = container_config.index(option_line)
|
||||
# If the sanitized values don't match replace them
|
||||
|
@ -1064,6 +1067,9 @@ class LxcContainerManagement(object):
|
|||
self.container.stop()
|
||||
self.state_change = True
|
||||
|
||||
# Run container startup
|
||||
self._container_startup()
|
||||
|
||||
# Check if the container needs to have an archive created.
|
||||
self._check_archive()
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ovirt
|
||||
author: '"Vincent Van der Kussen (@vincentvdk)" <vincent at vanderkussen.org>'
|
||||
author: "Vincent Van der Kussen (@vincentvdk)"
|
||||
short_description: oVirt/RHEV platform management
|
||||
description:
|
||||
- allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform
|
||||
|
|
|
@ -41,7 +41,7 @@ options:
|
|||
- the instance id
|
||||
default: null
|
||||
required: true
|
||||
https_verify_ssl:
|
||||
validate_certs:
|
||||
description:
|
||||
- enable / disable https certificate verification
|
||||
default: false
|
||||
|
@ -219,6 +219,7 @@ def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, sw
|
|||
% proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def start_instance(module, proxmox, vm, vmid, timeout):
|
||||
taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.start.post()
|
||||
|
@ -272,7 +273,7 @@ def main():
|
|||
api_user = dict(required=True),
|
||||
api_password = dict(no_log=True),
|
||||
vmid = dict(required=True),
|
||||
https_verify_ssl = dict(type='bool', choices=BOOLEANS, default='no'),
|
||||
validate_certs = dict(type='bool', choices=BOOLEANS, default='no'),
|
||||
node = dict(),
|
||||
password = dict(no_log=True),
|
||||
hostname = dict(),
|
||||
|
@ -302,7 +303,7 @@ def main():
|
|||
api_host = module.params['api_host']
|
||||
api_password = module.params['api_password']
|
||||
vmid = module.params['vmid']
|
||||
https_verify_ssl = module.params['https_verify_ssl']
|
||||
validate_certs = module.params['validate_certs']
|
||||
node = module.params['node']
|
||||
disk = module.params['disk']
|
||||
cpus = module.params['cpus']
|
||||
|
@ -319,7 +320,7 @@ def main():
|
|||
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
|
||||
|
||||
try:
|
||||
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=https_verify_ssl)
|
||||
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
|
||||
except Exception, e:
|
||||
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
|
||||
|
||||
|
|
232
cloud/misc/proxmox_template.py
Normal file
232
cloud/misc/proxmox_template.py
Normal file
|
@ -0,0 +1,232 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: proxmox_template
|
||||
short_description: management of OS templates in Proxmox VE cluster
|
||||
description:
|
||||
- allows you to upload/delete templates in Proxmox VE cluster
|
||||
version_added: "2.0"
|
||||
options:
|
||||
api_host:
|
||||
description:
|
||||
- the host of the Proxmox VE cluster
|
||||
required: true
|
||||
api_user:
|
||||
description:
|
||||
- the user to authenticate with
|
||||
required: true
|
||||
api_password:
|
||||
description:
|
||||
- the password to authenticate with
|
||||
- you can use PROXMOX_PASSWORD environment variable
|
||||
default: null
|
||||
required: false
|
||||
validate_certs:
|
||||
description:
|
||||
- enable / disable https certificate verification
|
||||
default: false
|
||||
required: false
|
||||
type: boolean
|
||||
node:
|
||||
description:
|
||||
- Proxmox VE node, when you will operate with template
|
||||
default: null
|
||||
required: true
|
||||
src:
|
||||
description:
|
||||
- path to uploaded file
|
||||
- required only for C(state=present)
|
||||
default: null
|
||||
required: false
|
||||
aliases: ['path']
|
||||
template:
|
||||
description:
|
||||
- the template name
|
||||
- required only for states C(absent), C(info)
|
||||
default: null
|
||||
required: false
|
||||
content_type:
|
||||
description:
|
||||
- content type
|
||||
- required only for C(state=present)
|
||||
default: 'vztmpl'
|
||||
required: false
|
||||
choices: ['vztmpl', 'iso']
|
||||
storage:
|
||||
description:
|
||||
- target storage
|
||||
default: 'local'
|
||||
required: false
|
||||
type: string
|
||||
timeout:
|
||||
description:
|
||||
- timeout for operations
|
||||
default: 30
|
||||
required: false
|
||||
type: integer
|
||||
force:
|
||||
description:
|
||||
- can be used only with C(state=present), exists template will be overwritten
|
||||
default: false
|
||||
required: false
|
||||
type: boolean
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the template
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
notes:
|
||||
- Requires proxmoxer and requests modules on host. This modules can be installed with pip.
|
||||
requirements: [ "proxmoxer", "requests" ]
|
||||
author: "Sergei Antipov @UnderGreen"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Upload new openvz template with minimal options
|
||||
- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' src='~/ubuntu-14.04-x86_64.tar.gz'
|
||||
|
||||
# Upload new openvz template with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
|
||||
- proxmox_template: node='uk-mc02' api_user='root@pam' api_host='node1' src='~/ubuntu-14.04-x86_64.tar.gz'
|
||||
|
||||
# Upload new openvz template with all options and force overwrite
|
||||
- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' storage='local' content_type='vztmpl' src='~/ubuntu-14.04-x86_64.tar.gz' force=yes
|
||||
|
||||
# Delete template with minimal options
|
||||
- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' template='ubuntu-14.04-x86_64.tar.gz' state=absent
|
||||
'''
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
try:
|
||||
from proxmoxer import ProxmoxAPI
|
||||
HAS_PROXMOXER = True
|
||||
except ImportError:
|
||||
HAS_PROXMOXER = False
|
||||
|
||||
def get_template(proxmox, node, storage, content_type, template):
|
||||
return [ True for tmpl in proxmox.nodes(node).storage(storage).content.get()
|
||||
if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template) ]
|
||||
|
||||
def upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout):
|
||||
taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath))
|
||||
while timeout:
|
||||
task_status = proxmox.nodes(api_host.split('.')[0]).tasks(taskid).status.get()
|
||||
if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK':
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for uploading template. Last line in task before timeout: %s'
|
||||
% proxmox.node(node).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def delete_template(module, proxmox, node, storage, content_type, template, timeout):
|
||||
volid = '%s:%s/%s' % (storage, content_type, template)
|
||||
proxmox.nodes(node).storage(storage).content.delete(volid)
|
||||
while timeout:
|
||||
if not get_template(proxmox, node, storage, content_type, template):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for deleting template.')
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
api_host = dict(required=True),
|
||||
api_user = dict(required=True),
|
||||
api_password = dict(no_log=True),
|
||||
validate_certs = dict(type='bool', choices=BOOLEANS, default='no'),
|
||||
node = dict(),
|
||||
src = dict(),
|
||||
template = dict(),
|
||||
content_type = dict(default='vztmpl', choices=['vztmpl','iso']),
|
||||
storage = dict(default='local'),
|
||||
timeout = dict(type='int', default=30),
|
||||
force = dict(type='bool', choices=BOOLEANS, default='no'),
|
||||
state = dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg='proxmoxer required for this module')
|
||||
|
||||
state = module.params['state']
|
||||
api_user = module.params['api_user']
|
||||
api_host = module.params['api_host']
|
||||
api_password = module.params['api_password']
|
||||
validate_certs = module.params['validate_certs']
|
||||
node = module.params['node']
|
||||
storage = module.params['storage']
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# If password not set get it from PROXMOX_PASSWORD env
|
||||
if not api_password:
|
||||
try:
|
||||
api_password = os.environ['PROXMOX_PASSWORD']
|
||||
except KeyError, e:
|
||||
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
|
||||
|
||||
try:
|
||||
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
|
||||
except Exception, e:
|
||||
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
content_type = module.params['content_type']
|
||||
src = module.params['src']
|
||||
|
||||
from ansible import utils
|
||||
realpath = utils.path_dwim(None, src)
|
||||
template = os.path.basename(realpath)
|
||||
if get_template(proxmox, node, storage, content_type, template) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template))
|
||||
elif not src:
|
||||
module.fail_json(msg='src param to uploading template file is mandatory')
|
||||
elif not (os.path.exists(realpath) and os.path.isfile(realpath)):
|
||||
module.fail_json(msg='template file on path %s not exists' % realpath)
|
||||
|
||||
if upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout):
|
||||
module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
|
||||
except Exception, e:
|
||||
module.fail_json(msg="uploading of template %s failed with exception: %s" % ( template, e ))
|
||||
|
||||
elif state == 'absent':
|
||||
try:
|
||||
content_type = module.params['content_type']
|
||||
template = module.params['template']
|
||||
|
||||
if not template:
|
||||
module.fail_json(msg='template param is mandatory')
|
||||
elif not get_template(proxmox, node, storage, content_type, template):
|
||||
module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template))
|
||||
|
||||
if delete_template(module, proxmox, node, storage, content_type, template, timeout):
|
||||
module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template))
|
||||
except Exception, e:
|
||||
module.fail_json(msg="deleting of template %s failed with exception: %s" % ( template, e ))
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
|
@ -60,8 +60,8 @@ requirements:
|
|||
- "libvirt-python"
|
||||
author:
|
||||
- "Ansible Core Team"
|
||||
- '"Michael DeHaan (@mpdehaan)" <michael.dehaan@gmail.com>'
|
||||
- '"Seth Vidal (@skvidal)" <skvidal@fedoraproject.org>'
|
||||
- "Michael DeHaan"
|
||||
- "Seth Vidal"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
0
cloud/rackspace/__init__.py
Normal file
0
cloud/rackspace/__init__.py
Normal file
227
cloud/rackspace/rax_mon_alarm.py
Normal file
227
cloud/rackspace/rax_mon_alarm.py
Normal file
|
@ -0,0 +1,227 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_mon_alarm
|
||||
short_description: Create or delete a Rackspace Cloud Monitoring alarm.
|
||||
description:
|
||||
- Create or delete a Rackspace Cloud Monitoring alarm that associates an
|
||||
existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with
|
||||
criteria that specify what conditions will trigger which levels of
|
||||
notifications. Rackspace monitoring module flow | rax_mon_entity ->
|
||||
rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan ->
|
||||
*rax_mon_alarm*
|
||||
version_added: "2.0"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Ensure that the alarm with this C(label) exists or does not exist.
|
||||
choices: [ "present", "absent" ]
|
||||
required: false
|
||||
default: present
|
||||
label:
|
||||
description:
|
||||
- Friendly name for this alarm, used to achieve idempotence. Must be a String
|
||||
between 1 and 255 characters long.
|
||||
required: true
|
||||
entity_id:
|
||||
description:
|
||||
- ID of the entity this alarm is attached to. May be acquired by registering
|
||||
the value of a rax_mon_entity task.
|
||||
required: true
|
||||
check_id:
|
||||
description:
|
||||
- ID of the check that should be alerted on. May be acquired by registering
|
||||
the value of a rax_mon_check task.
|
||||
required: true
|
||||
notification_plan_id:
|
||||
description:
|
||||
- ID of the notification plan to trigger if this alarm fires. May be acquired
|
||||
by registering the value of a rax_mon_notification_plan task.
|
||||
required: true
|
||||
criteria:
|
||||
description:
|
||||
- Alarm DSL that describes alerting conditions and their output states. Must
|
||||
be between 1 and 16384 characters long. See
|
||||
http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html
|
||||
for a reference on the alerting language.
|
||||
disabled:
|
||||
description:
|
||||
- If yes, create this alarm, but leave it in an inactive state. Defaults to
|
||||
no.
|
||||
choices: [ "yes", "no" ]
|
||||
metadata:
|
||||
description:
|
||||
- Arbitrary key/value pairs to accompany the alarm. Must be a hash of String
|
||||
keys and values between 1 and 255 characters long.
|
||||
author: Ash Wilson
|
||||
extends_documentation_fragment: rackspace.openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Alarm example
|
||||
gather_facts: False
|
||||
hosts: local
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Ensure that a specific alarm exists.
|
||||
rax_mon_alarm:
|
||||
credentials: ~/.rax_pub
|
||||
state: present
|
||||
label: uhoh
|
||||
entity_id: "{{ the_entity['entity']['id'] }}"
|
||||
check_id: "{{ the_check['check']['id'] }}"
|
||||
notification_plan_id: "{{ defcon1['notification_plan']['id'] }}"
|
||||
criteria: >
|
||||
if (rate(metric['average']) > 10) {
|
||||
return new AlarmStatus(WARNING);
|
||||
}
|
||||
return new AlarmStatus(OK);
|
||||
register: the_alarm
|
||||
'''
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
HAS_PYRAX = False
|
||||
|
||||
def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria,
|
||||
disabled, metadata):
|
||||
|
||||
if len(label) < 1 or len(label) > 255:
|
||||
module.fail_json(msg='label must be between 1 and 255 characters long')
|
||||
|
||||
if criteria and len(criteria) < 1 or len(criteria) > 16384:
|
||||
module.fail_json(msg='criteria must be between 1 and 16384 characters long')
|
||||
|
||||
# Coerce attributes.
|
||||
|
||||
changed = False
|
||||
alarm = None
|
||||
|
||||
cm = pyrax.cloud_monitoring
|
||||
if not cm:
|
||||
module.fail_json(msg='Failed to instantiate client. This typically '
|
||||
'indicates an invalid region or an incorrectly '
|
||||
'capitalized region name.')
|
||||
|
||||
existing = [a for a in cm.list_alarms(entity_id) if a.label == label]
|
||||
|
||||
if existing:
|
||||
alarm = existing[0]
|
||||
|
||||
if state == 'present':
|
||||
should_create = False
|
||||
should_update = False
|
||||
should_delete = False
|
||||
|
||||
if len(existing) > 1:
|
||||
module.fail_json(msg='%s existing alarms have the label %s.' %
|
||||
(len(existing), label))
|
||||
|
||||
if alarm:
|
||||
if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id:
|
||||
should_delete = should_create = True
|
||||
|
||||
should_update = (disabled and disabled != alarm.disabled) or \
|
||||
(metadata and metadata != alarm.metadata) or \
|
||||
(criteria and criteria != alarm.criteria)
|
||||
|
||||
if should_update and not should_delete:
|
||||
cm.update_alarm(entity=entity_id, alarm=alarm,
|
||||
criteria=criteria, disabled=disabled,
|
||||
label=label, metadata=metadata)
|
||||
changed = True
|
||||
|
||||
if should_delete:
|
||||
alarm.delete()
|
||||
changed = True
|
||||
else:
|
||||
should_create = True
|
||||
|
||||
if should_create:
|
||||
alarm = cm.create_alarm(entity=entity_id, check=check_id,
|
||||
notification_plan=notification_plan_id,
|
||||
criteria=criteria, disabled=disabled, label=label,
|
||||
metadata=metadata)
|
||||
changed = True
|
||||
else:
|
||||
for a in existing:
|
||||
a.delete()
|
||||
changed = True
|
||||
|
||||
if alarm:
|
||||
alarm_dict = {
|
||||
"id": alarm.id,
|
||||
"label": alarm.label,
|
||||
"check_id": alarm.check_id,
|
||||
"notification_plan_id": alarm.notification_plan_id,
|
||||
"criteria": alarm.criteria,
|
||||
"disabled": alarm.disabled,
|
||||
"metadata": alarm.metadata
|
||||
}
|
||||
module.exit_json(changed=changed, alarm=alarm_dict)
|
||||
else:
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
def main():
|
||||
argument_spec = rax_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
label=dict(required=True),
|
||||
entity_id=dict(required=True),
|
||||
check_id=dict(required=True),
|
||||
notification_plan_id=dict(required=True),
|
||||
criteria=dict(),
|
||||
disabled=dict(type='bool', default=False),
|
||||
metadata=dict(type='dict')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_together=rax_required_together()
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
state = module.params.get('state')
|
||||
label = module.params.get('label')
|
||||
entity_id = module.params.get('entity_id')
|
||||
check_id = module.params.get('check_id')
|
||||
notification_plan_id = module.params.get('notification_plan_id')
|
||||
criteria = module.params.get('criteria')
|
||||
disabled = module.boolean(module.params.get('disabled'))
|
||||
metadata = module.params.get('metadata')
|
||||
|
||||
setup_rax_module(module, pyrax)
|
||||
|
||||
alarm(module, state, label, entity_id, check_id, notification_plan_id,
|
||||
criteria, disabled, metadata)
|
||||
|
||||
|
||||
# Import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.rax import *
|
||||
|
||||
# Invoke the module.
|
||||
main()
|
313
cloud/rackspace/rax_mon_check.py
Normal file
313
cloud/rackspace/rax_mon_check.py
Normal file
|
@ -0,0 +1,313 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_mon_check
|
||||
short_description: Create or delete a Rackspace Cloud Monitoring check for an
|
||||
existing entity.
|
||||
description:
|
||||
- Create or delete a Rackspace Cloud Monitoring check associated with an
|
||||
existing rax_mon_entity. A check is a specific test or measurement that is
|
||||
performed, possibly from different monitoring zones, on the systems you
|
||||
monitor. Rackspace monitoring module flow | rax_mon_entity ->
|
||||
*rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan ->
|
||||
rax_mon_alarm
|
||||
version_added: "2.0"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Ensure that a check with this C(label) exists or does not exist.
|
||||
choices: ["present", "absent"]
|
||||
entity_id:
|
||||
description:
|
||||
- ID of the rax_mon_entity to target with this check.
|
||||
required: true
|
||||
label:
|
||||
description:
|
||||
- Defines a label for this check, between 1 and 64 characters long.
|
||||
required: true
|
||||
check_type:
|
||||
description:
|
||||
- The type of check to create. C(remote.) checks may be created on any
|
||||
rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities
|
||||
that have a non-null C(agent_id).
|
||||
choices:
|
||||
- remote.dns
|
||||
- remote.ftp-banner
|
||||
- remote.http
|
||||
- remote.imap-banner
|
||||
- remote.mssql-banner
|
||||
- remote.mysql-banner
|
||||
- remote.ping
|
||||
- remote.pop3-banner
|
||||
- remote.postgresql-banner
|
||||
- remote.smtp-banner
|
||||
- remote.smtp
|
||||
- remote.ssh
|
||||
- remote.tcp
|
||||
- remote.telnet-banner
|
||||
- agent.filesystem
|
||||
- agent.memory
|
||||
- agent.load_average
|
||||
- agent.cpu
|
||||
- agent.disk
|
||||
- agent.network
|
||||
- agent.plugin
|
||||
required: true
|
||||
monitoring_zones_poll:
|
||||
description:
|
||||
- Comma-separated list of the names of the monitoring zones the check should
|
||||
run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon,
|
||||
mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks.
|
||||
target_hostname:
|
||||
description:
|
||||
- One of `target_hostname` and `target_alias` is required for remote.* checks,
|
||||
but prohibited for agent.* checks. The hostname this check should target.
|
||||
Must be a valid IPv4, IPv6, or FQDN.
|
||||
target_alias:
|
||||
description:
|
||||
- One of `target_alias` and `target_hostname` is required for remote.* checks,
|
||||
but prohibited for agent.* checks. Use the corresponding key in the entity's
|
||||
`ip_addresses` hash to resolve an IP address to target.
|
||||
details:
|
||||
description:
|
||||
- Additional details specific to the check type. Must be a hash of strings
|
||||
between 1 and 255 characters long, or an array or object containing 0 to
|
||||
256 items.
|
||||
disabled:
|
||||
description:
|
||||
- If "yes", ensure the check is created, but don't actually use it yet.
|
||||
choices: [ "yes", "no" ]
|
||||
metadata:
|
||||
description:
|
||||
- Hash of arbitrary key-value pairs to accompany this check if it fires.
|
||||
Keys and values must be strings between 1 and 255 characters long.
|
||||
period:
|
||||
description:
|
||||
- The number of seconds between each time the check is performed. Must be
|
||||
greater than the minimum period set on your account.
|
||||
timeout:
|
||||
description:
|
||||
- The number of seconds this check will wait when attempting to collect
|
||||
results. Must be less than the period.
|
||||
author: Ash Wilson
|
||||
extends_documentation_fragment: rackspace.openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a monitoring check
|
||||
gather_facts: False
|
||||
hosts: local
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Associate a check with an existing entity.
|
||||
rax_mon_check:
|
||||
credentials: ~/.rax_pub
|
||||
state: present
|
||||
entity_id: "{{ the_entity['entity']['id'] }}"
|
||||
label: the_check
|
||||
check_type: remote.ping
|
||||
monitoring_zones_poll: mziad,mzord,mzdfw
|
||||
details:
|
||||
count: 10
|
||||
meta:
|
||||
hurf: durf
|
||||
register: the_check
|
||||
'''
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
HAS_PYRAX = False
|
||||
|
||||
def cloud_check(module, state, entity_id, label, check_type,
|
||||
monitoring_zones_poll, target_hostname, target_alias, details,
|
||||
disabled, metadata, period, timeout):
|
||||
|
||||
# Coerce attributes.
|
||||
|
||||
if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list):
|
||||
monitoring_zones_poll = [monitoring_zones_poll]
|
||||
|
||||
if period:
|
||||
period = int(period)
|
||||
|
||||
if timeout:
|
||||
timeout = int(timeout)
|
||||
|
||||
changed = False
|
||||
check = None
|
||||
|
||||
cm = pyrax.cloud_monitoring
|
||||
if not cm:
|
||||
module.fail_json(msg='Failed to instantiate client. This typically '
|
||||
'indicates an invalid region or an incorrectly '
|
||||
'capitalized region name.')
|
||||
|
||||
entity = cm.get_entity(entity_id)
|
||||
if not entity:
|
||||
module.fail_json(msg='Failed to instantiate entity. "%s" may not be'
|
||||
' a valid entity id.' % entity_id)
|
||||
|
||||
existing = [e for e in entity.list_checks() if e.label == label]
|
||||
|
||||
if existing:
|
||||
check = existing[0]
|
||||
|
||||
if state == 'present':
|
||||
if len(existing) > 1:
|
||||
module.fail_json(msg='%s existing checks have a label of %s.' %
|
||||
(len(existing), label))
|
||||
|
||||
should_delete = False
|
||||
should_create = False
|
||||
should_update = False
|
||||
|
||||
if check:
|
||||
# Details may include keys set to default values that are not
|
||||
# included in the initial creation.
|
||||
#
|
||||
# Only force a recreation of the check if one of the *specified*
|
||||
# keys is missing or has a different value.
|
||||
if details:
|
||||
for (key, value) in details.iteritems():
|
||||
if key not in check.details:
|
||||
should_delete = should_create = True
|
||||
elif value != check.details[key]:
|
||||
should_delete = should_create = True
|
||||
|
||||
should_update = label != check.label or \
|
||||
(target_hostname and target_hostname != check.target_hostname) or \
|
||||
(target_alias and target_alias != check.target_alias) or \
|
||||
(disabled != check.disabled) or \
|
||||
(metadata and metadata != check.metadata) or \
|
||||
(period and period != check.period) or \
|
||||
(timeout and timeout != check.timeout) or \
|
||||
(monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll)
|
||||
|
||||
if should_update and not should_delete:
|
||||
check.update(label=label,
|
||||
disabled=disabled,
|
||||
metadata=metadata,
|
||||
monitoring_zones_poll=monitoring_zones_poll,
|
||||
timeout=timeout,
|
||||
period=period,
|
||||
target_alias=target_alias,
|
||||
target_hostname=target_hostname)
|
||||
changed = True
|
||||
else:
|
||||
# The check doesn't exist yet.
|
||||
should_create = True
|
||||
|
||||
if should_delete:
|
||||
check.delete()
|
||||
|
||||
if should_create:
|
||||
check = cm.create_check(entity,
|
||||
label=label,
|
||||
check_type=check_type,
|
||||
target_hostname=target_hostname,
|
||||
target_alias=target_alias,
|
||||
monitoring_zones_poll=monitoring_zones_poll,
|
||||
details=details,
|
||||
disabled=disabled,
|
||||
metadata=metadata,
|
||||
period=period,
|
||||
timeout=timeout)
|
||||
changed = True
|
||||
elif state == 'absent':
|
||||
if check:
|
||||
check.delete()
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg='state must be either present or absent.')
|
||||
|
||||
if check:
|
||||
check_dict = {
|
||||
"id": check.id,
|
||||
"label": check.label,
|
||||
"type": check.type,
|
||||
"target_hostname": check.target_hostname,
|
||||
"target_alias": check.target_alias,
|
||||
"monitoring_zones_poll": check.monitoring_zones_poll,
|
||||
"details": check.details,
|
||||
"disabled": check.disabled,
|
||||
"metadata": check.metadata,
|
||||
"period": check.period,
|
||||
"timeout": check.timeout
|
||||
}
|
||||
module.exit_json(changed=changed, check=check_dict)
|
||||
else:
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
def main():
|
||||
argument_spec = rax_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
entity_id=dict(required=True),
|
||||
label=dict(required=True),
|
||||
check_type=dict(required=True),
|
||||
monitoring_zones_poll=dict(),
|
||||
target_hostname=dict(),
|
||||
target_alias=dict(),
|
||||
details=dict(type='dict', default={}),
|
||||
disabled=dict(type='bool', default=False),
|
||||
metadata=dict(type='dict', default={}),
|
||||
period=dict(type='int'),
|
||||
timeout=dict(type='int'),
|
||||
state=dict(default='present', choices=['present', 'absent'])
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_together=rax_required_together()
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
entity_id = module.params.get('entity_id')
|
||||
label = module.params.get('label')
|
||||
check_type = module.params.get('check_type')
|
||||
monitoring_zones_poll = module.params.get('monitoring_zones_poll')
|
||||
target_hostname = module.params.get('target_hostname')
|
||||
target_alias = module.params.get('target_alias')
|
||||
details = module.params.get('details')
|
||||
disabled = module.boolean(module.params.get('disabled'))
|
||||
metadata = module.params.get('metadata')
|
||||
period = module.params.get('period')
|
||||
timeout = module.params.get('timeout')
|
||||
|
||||
state = module.params.get('state')
|
||||
|
||||
setup_rax_module(module, pyrax)
|
||||
|
||||
cloud_check(module, state, entity_id, label, check_type,
|
||||
monitoring_zones_poll, target_hostname, target_alias, details,
|
||||
disabled, metadata, period, timeout)
|
||||
|
||||
|
||||
# Import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.rax import *
|
||||
|
||||
# Invoke the module.
|
||||
main()
|
192
cloud/rackspace/rax_mon_entity.py
Normal file
192
cloud/rackspace/rax_mon_entity.py
Normal file
|
@ -0,0 +1,192 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_mon_entity
|
||||
short_description: Create or delete a Rackspace Cloud Monitoring entity
|
||||
description:
|
||||
- Create or delete a Rackspace Cloud Monitoring entity, which represents a device
|
||||
to monitor. Entities associate checks and alarms with a target system and
|
||||
provide a convenient, centralized place to store IP addresses. Rackspace
|
||||
monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
|
||||
rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm
|
||||
version_added: "2.0"
|
||||
options:
|
||||
label:
|
||||
description:
|
||||
- Defines a name for this entity. Must be a non-empty string between 1 and
|
||||
255 characters long.
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Ensure that an entity with this C(name) exists or does not exist.
|
||||
choices: ["present", "absent"]
|
||||
agent_id:
|
||||
description:
|
||||
- Rackspace monitoring agent on the target device to which this entity is
|
||||
bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
|
||||
named_ip_addresses:
|
||||
description:
|
||||
- Hash of IP addresses that may be referenced by name by rax_mon_checks
|
||||
added to this entity. Must be a dictionary of with keys that are names
|
||||
between 1 and 64 characters long, and values that are valid IPv4 or IPv6
|
||||
addresses.
|
||||
metadata:
|
||||
description:
|
||||
- Hash of arbitrary C(name), C(value) pairs that are passed to associated
|
||||
rax_mon_alarms. Names and values must all be between 1 and 255 characters
|
||||
long.
|
||||
author: Ash Wilson
|
||||
extends_documentation_fragment: rackspace.openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Entity example
|
||||
gather_facts: False
|
||||
hosts: local
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Ensure an entity exists
|
||||
rax_mon_entity:
|
||||
credentials: ~/.rax_pub
|
||||
state: present
|
||||
label: my_entity
|
||||
named_ip_addresses:
|
||||
web_box: 192.168.0.10
|
||||
db_box: 192.168.0.11
|
||||
meta:
|
||||
hurf: durf
|
||||
register: the_entity
|
||||
'''
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
HAS_PYRAX = False
|
||||
|
||||
def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
|
||||
metadata):
|
||||
|
||||
if len(label) < 1 or len(label) > 255:
|
||||
module.fail_json(msg='label must be between 1 and 255 characters long')
|
||||
|
||||
changed = False
|
||||
|
||||
cm = pyrax.cloud_monitoring
|
||||
if not cm:
|
||||
module.fail_json(msg='Failed to instantiate client. This typically '
|
||||
'indicates an invalid region or an incorrectly '
|
||||
'capitalized region name.')
|
||||
|
||||
existing = []
|
||||
for entity in cm.list_entities():
|
||||
if label == entity.label:
|
||||
existing.append(entity)
|
||||
|
||||
entity = None
|
||||
|
||||
if existing:
|
||||
entity = existing[0]
|
||||
|
||||
if state == 'present':
|
||||
should_update = False
|
||||
should_delete = False
|
||||
should_create = False
|
||||
|
||||
if len(existing) > 1:
|
||||
module.fail_json(msg='%s existing entities have the label %s.' %
|
||||
(len(existing), label))
|
||||
|
||||
if entity:
|
||||
if named_ip_addresses and named_ip_addresses != entity.ip_addresses:
|
||||
should_delete = should_create = True
|
||||
|
||||
# Change an existing Entity, unless there's nothing to do.
|
||||
should_update = agent_id and agent_id != entity.agent_id or \
|
||||
(metadata and metadata != entity.metadata)
|
||||
|
||||
if should_update and not should_delete:
|
||||
entity.update(agent_id, metadata)
|
||||
changed = True
|
||||
|
||||
if should_delete:
|
||||
entity.delete()
|
||||
else:
|
||||
should_create = True
|
||||
|
||||
if should_create:
|
||||
# Create a new Entity.
|
||||
entity = cm.create_entity(label=label, agent=agent_id,
|
||||
ip_addresses=named_ip_addresses,
|
||||
metadata=metadata)
|
||||
changed = True
|
||||
else:
|
||||
# Delete the existing Entities.
|
||||
for e in existing:
|
||||
e.delete()
|
||||
changed = True
|
||||
|
||||
if entity:
|
||||
entity_dict = {
|
||||
"id": entity.id,
|
||||
"name": entity.name,
|
||||
"agent_id": entity.agent_id,
|
||||
}
|
||||
module.exit_json(changed=changed, entity=entity_dict)
|
||||
else:
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
def main():
|
||||
argument_spec = rax_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
label=dict(required=True),
|
||||
agent_id=dict(),
|
||||
named_ip_addresses=dict(type='dict', default={}),
|
||||
metadata=dict(type='dict', default={})
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_together=rax_required_together()
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
state = module.params.get('state')
|
||||
|
||||
label = module.params.get('label')
|
||||
agent_id = module.params.get('agent_id')
|
||||
named_ip_addresses = module.params.get('named_ip_addresses')
|
||||
metadata = module.params.get('metadata')
|
||||
|
||||
setup_rax_module(module, pyrax)
|
||||
|
||||
cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata)
|
||||
|
||||
# Import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.rax import *
|
||||
|
||||
# Invoke the module.
|
||||
main()
|
176
cloud/rackspace/rax_mon_notification.py
Normal file
176
cloud/rackspace/rax_mon_notification.py
Normal file
|
@ -0,0 +1,176 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_mon_notification
|
||||
short_description: Create or delete a Rackspace Cloud Monitoring notification.
|
||||
description:
|
||||
- Create or delete a Rackspace Cloud Monitoring notification that specifies a
|
||||
channel that can be used to communicate alarms, such as email, webhooks, or
|
||||
PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
|
||||
*rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
|
||||
version_added: "2.0"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Ensure that the notification with this C(label) exists or does not exist.
|
||||
choices: ['present', 'absent']
|
||||
label:
|
||||
description:
|
||||
- Defines a friendly name for this notification. String between 1 and 255
|
||||
characters long.
|
||||
required: true
|
||||
notification_type:
|
||||
description:
|
||||
- A supported notification type.
|
||||
choices: ["webhook", "email", "pagerduty"]
|
||||
required: true
|
||||
details:
|
||||
description:
|
||||
- Dictionary of key-value pairs used to initialize the notification.
|
||||
Required keys and meanings vary with notification type. See
|
||||
http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
|
||||
service-notification-types-crud.html for details.
|
||||
required: true
|
||||
author: Ash Wilson
|
||||
extends_documentation_fragment: rackspace.openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Monitoring notification example
|
||||
gather_facts: False
|
||||
hosts: local
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Email me when something goes wrong.
|
||||
rax_mon_entity:
|
||||
credentials: ~/.rax_pub
|
||||
label: omg
|
||||
type: email
|
||||
details:
|
||||
address: me@mailhost.com
|
||||
register: the_notification
|
||||
'''
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
HAS_PYRAX = False
|
||||
|
||||
def notification(module, state, label, notification_type, details):
|
||||
|
||||
if len(label) < 1 or len(label) > 255:
|
||||
module.fail_json(msg='label must be between 1 and 255 characters long')
|
||||
|
||||
changed = False
|
||||
notification = None
|
||||
|
||||
cm = pyrax.cloud_monitoring
|
||||
if not cm:
|
||||
module.fail_json(msg='Failed to instantiate client. This typically '
|
||||
'indicates an invalid region or an incorrectly '
|
||||
'capitalized region name.')
|
||||
|
||||
existing = []
|
||||
for n in cm.list_notifications():
|
||||
if n.label == label:
|
||||
existing.append(n)
|
||||
|
||||
if existing:
|
||||
notification = existing[0]
|
||||
|
||||
if state == 'present':
|
||||
should_update = False
|
||||
should_delete = False
|
||||
should_create = False
|
||||
|
||||
if len(existing) > 1:
|
||||
module.fail_json(msg='%s existing notifications are labelled %s.' %
|
||||
(len(existing), label))
|
||||
|
||||
if notification:
|
||||
should_delete = (notification_type != notification.type)
|
||||
|
||||
should_update = (details != notification.details)
|
||||
|
||||
if should_update and not should_delete:
|
||||
notification.update(details=notification.details)
|
||||
changed = True
|
||||
|
||||
if should_delete:
|
||||
notification.delete()
|
||||
else:
|
||||
should_create = True
|
||||
|
||||
if should_create:
|
||||
notification = cm.create_notification(notification_type,
|
||||
label=label, details=details)
|
||||
changed = True
|
||||
else:
|
||||
for n in existing:
|
||||
n.delete()
|
||||
changed = True
|
||||
|
||||
if notification:
|
||||
notification_dict = {
|
||||
"id": notification.id,
|
||||
"type": notification.type,
|
||||
"label": notification.label,
|
||||
"details": notification.details
|
||||
}
|
||||
module.exit_json(changed=changed, notification=notification_dict)
|
||||
else:
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
def main():
|
||||
argument_spec = rax_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
label=dict(required=True),
|
||||
notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
|
||||
details=dict(required=True, type='dict')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_together=rax_required_together()
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
state = module.params.get('state')
|
||||
|
||||
label = module.params.get('label')
|
||||
notification_type = module.params.get('notification_type')
|
||||
details = module.params.get('details')
|
||||
|
||||
setup_rax_module(module, pyrax)
|
||||
|
||||
notification(module, state, label, notification_type, details)
|
||||
|
||||
# Import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.rax import *
|
||||
|
||||
# Invoke the module.
|
||||
main()
|
181
cloud/rackspace/rax_mon_notification_plan.py
Normal file
181
cloud/rackspace/rax_mon_notification_plan.py
Normal file
|
@ -0,0 +1,181 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_mon_notification_plan
|
||||
short_description: Create or delete a Rackspace Cloud Monitoring notification
|
||||
plan.
|
||||
description:
|
||||
- Create or delete a Rackspace Cloud Monitoring notification plan by
|
||||
associating existing rax_mon_notifications with severity levels. Rackspace
|
||||
monitoring module flow | rax_mon_entity -> rax_mon_check ->
|
||||
rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm
|
||||
version_added: "2.0"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Ensure that the notification plan with this C(label) exists or does not
|
||||
exist.
|
||||
choices: ['present', 'absent']
|
||||
label:
|
||||
description:
|
||||
- Defines a friendly name for this notification plan. String between 1 and
|
||||
255 characters long.
|
||||
required: true
|
||||
critical_state:
|
||||
description:
|
||||
- Notification list to use when the alarm state is CRITICAL. Must be an
|
||||
array of valid rax_mon_notification ids.
|
||||
warning_state:
|
||||
description:
|
||||
- Notification list to use when the alarm state is WARNING. Must be an array
|
||||
of valid rax_mon_notification ids.
|
||||
ok_state:
|
||||
description:
|
||||
- Notification list to use when the alarm state is OK. Must be an array of
|
||||
valid rax_mon_notification ids.
|
||||
author: Ash Wilson
|
||||
extends_documentation_fragment: rackspace.openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Example notification plan
|
||||
gather_facts: False
|
||||
hosts: local
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Establish who gets called when.
|
||||
rax_mon_notification_plan:
|
||||
credentials: ~/.rax_pub
|
||||
state: present
|
||||
label: defcon1
|
||||
critical_state:
|
||||
- "{{ everyone['notification']['id'] }}"
|
||||
warning_state:
|
||||
- "{{ opsfloor['notification']['id'] }}"
|
||||
register: defcon1
|
||||
'''
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
HAS_PYRAX = False
|
||||
|
||||
def notification_plan(module, state, label, critical_state, warning_state, ok_state):
|
||||
|
||||
if len(label) < 1 or len(label) > 255:
|
||||
module.fail_json(msg='label must be between 1 and 255 characters long')
|
||||
|
||||
changed = False
|
||||
notification_plan = None
|
||||
|
||||
cm = pyrax.cloud_monitoring
|
||||
if not cm:
|
||||
module.fail_json(msg='Failed to instantiate client. This typically '
|
||||
'indicates an invalid region or an incorrectly '
|
||||
'capitalized region name.')
|
||||
|
||||
existing = []
|
||||
for n in cm.list_notification_plans():
|
||||
if n.label == label:
|
||||
existing.append(n)
|
||||
|
||||
if existing:
|
||||
notification_plan = existing[0]
|
||||
|
||||
if state == 'present':
|
||||
should_create = False
|
||||
should_delete = False
|
||||
|
||||
if len(existing) > 1:
|
||||
module.fail_json(msg='%s notification plans are labelled %s.' %
|
||||
(len(existing), label))
|
||||
|
||||
if notification_plan:
|
||||
should_delete = (critical_state and critical_state != notification_plan.critical_state) or \
|
||||
(warning_state and warning_state != notification_plan.warning_state) or \
|
||||
(ok_state and ok_state != notification_plan.ok_state)
|
||||
|
||||
if should_delete:
|
||||
notification_plan.delete()
|
||||
should_create = True
|
||||
else:
|
||||
should_create = True
|
||||
|
||||
if should_create:
|
||||
notification_plan = cm.create_notification_plan(label=label,
|
||||
critical_state=critical_state,
|
||||
warning_state=warning_state,
|
||||
ok_state=ok_state)
|
||||
changed = True
|
||||
else:
|
||||
for np in existing:
|
||||
np.delete()
|
||||
changed = True
|
||||
|
||||
if notification_plan:
|
||||
notification_plan_dict = {
|
||||
"id": notification_plan.id,
|
||||
"critical_state": notification_plan.critical_state,
|
||||
"warning_state": notification_plan.warning_state,
|
||||
"ok_state": notification_plan.ok_state,
|
||||
"metadata": notification_plan.metadata
|
||||
}
|
||||
module.exit_json(changed=changed, notification_plan=notification_plan_dict)
|
||||
else:
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
def main():
|
||||
argument_spec = rax_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
label=dict(required=True),
|
||||
critical_state=dict(type='list'),
|
||||
warning_state=dict(type='list'),
|
||||
ok_state=dict(type='list')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_together=rax_required_together()
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
state = module.params.get('state')
|
||||
|
||||
label = module.params.get('label')
|
||||
critical_state = module.params.get('critical_state')
|
||||
warning_state = module.params.get('warning_state')
|
||||
ok_state = module.params.get('ok_state')
|
||||
|
||||
setup_rax_module(module, pyrax)
|
||||
|
||||
notification_plan(module, state, label, critical_state, warning_state, ok_state)
|
||||
|
||||
# Import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.rax import *
|
||||
|
||||
# Invoke the module.
|
||||
main()
|
|
@ -25,7 +25,7 @@ short_description: Manage VMware vSphere Datacenters
|
|||
description:
|
||||
- Manage VMware vSphere Datacenters
|
||||
version_added: 2.0
|
||||
author: '"Joseph Callen (@jcpowermac)" <jcallen () csc.com>'
|
||||
author: "Joseph Callen (@jcpowermac)"
|
||||
notes:
|
||||
- Tested on vSphere 5.5
|
||||
requirements:
|
||||
|
|
151
cloud/vmware/vsphere_copy.py
Normal file
151
cloud/vmware/vsphere_copy.py
Normal file
|
@ -0,0 +1,151 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2015 Dag Wieers <dag@wieers.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vsphere_copy
|
||||
short_description: Copy a file to a vCenter datastore
|
||||
description: Upload files to a vCenter datastore
|
||||
version_added: 2.0
|
||||
author: Dag Wieers (@dagwieers) <dag@wieers.com>
|
||||
options:
|
||||
host:
|
||||
description:
|
||||
- The vCenter server on which the datastore is available.
|
||||
required: true
|
||||
login:
|
||||
description:
|
||||
- The login name to authenticate on the vCenter server.
|
||||
required: true
|
||||
password:
|
||||
description:
|
||||
- The password to authenticate on the vCenter server.
|
||||
required: true
|
||||
src:
|
||||
description:
|
||||
- The file to push to vCenter
|
||||
required: true
|
||||
datacenter:
|
||||
description:
|
||||
- The datacenter on the vCenter server that holds the datastore.
|
||||
required: true
|
||||
datastore:
|
||||
description:
|
||||
- The datastore on the vCenter server to push files to.
|
||||
required: true
|
||||
path:
|
||||
description:
|
||||
- The file to push to the datastore on the vCenter server.
|
||||
required: true
|
||||
notes:
|
||||
- "This module ought to be run from a system that can access vCenter directly and has the file to transfer.
|
||||
It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to)."
|
||||
- Tested on vSphere 5.5
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- vsphere_copy: host=vhost login=vuser password=vpass src=/some/local/file datacenter='DC1 Someplace' datastore=datastore1 path=some/remote/file
|
||||
transport: local
|
||||
- vsphere_copy: host=vhost login=vuser password=vpass src=/other/local/file datacenter='DC2 Someplace' datastore=datastore2 path=other/remote/file
|
||||
delegate_to: other_system
|
||||
'''
|
||||
|
||||
import atexit
|
||||
import base64
|
||||
import httplib
|
||||
import urllib
|
||||
import mmap
|
||||
import errno
|
||||
import socket
|
||||
|
||||
def vmware_path(datastore, datacenter, path):
|
||||
''' Constructs a URL path that VSphere accepts reliably '''
|
||||
path = "/folder/%s" % path.lstrip("/")
|
||||
if not path.startswith("/"):
|
||||
path = "/" + path
|
||||
params = dict( dsName = datastore )
|
||||
if datacenter:
|
||||
params["dcPath"] = datacenter
|
||||
params = urllib.urlencode(params)
|
||||
return "%s?%s" % (path, params)
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
host = dict(required=True, aliases=[ 'hostname' ]),
|
||||
login = dict(required=True, aliases=[ 'username' ]),
|
||||
password = dict(required=True),
|
||||
src = dict(required=True, aliases=[ 'name' ]),
|
||||
datacenter = dict(required=True),
|
||||
datastore = dict(required=True),
|
||||
dest = dict(required=True, aliases=[ 'path' ]),
|
||||
),
|
||||
# Implementing check-mode using HEAD is impossible, since size/date is not 100% reliable
|
||||
supports_check_mode = False,
|
||||
)
|
||||
|
||||
host = module.params.get('host')
|
||||
login = module.params.get('login')
|
||||
password = module.params.get('password')
|
||||
src = module.params.get('src')
|
||||
datacenter = module.params.get('datacenter')
|
||||
datastore = module.params.get('datastore')
|
||||
dest = module.params.get('dest')
|
||||
|
||||
fd = open(src, "rb")
|
||||
atexit.register(fd.close)
|
||||
|
||||
data = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ)
|
||||
atexit.register(data.close)
|
||||
|
||||
conn = httplib.HTTPSConnection(host)
|
||||
atexit.register(conn.close)
|
||||
|
||||
remote_path = vmware_path(datastore, datacenter, dest)
|
||||
auth = base64.encodestring('%s:%s' % (login, password)).rstrip()
|
||||
headers = {
|
||||
"Content-Type": "application/octet-stream",
|
||||
"Content-Length": str(len(data)),
|
||||
"Authorization": "Basic %s" % auth,
|
||||
}
|
||||
|
||||
# URL is only used in JSON output (helps troubleshooting)
|
||||
url = 'https://%s%s' % (host, remote_path)
|
||||
|
||||
try:
|
||||
conn.request("PUT", remote_path, body=data, headers=headers)
|
||||
except socket.error, e:
|
||||
if isinstance(e.args, tuple) and e[0] == errno.ECONNRESET:
|
||||
# VSphere resets connection if the file is in use and cannot be replaced
|
||||
module.fail_json(msg='Failed to upload, image probably in use', status=e[0], reason=str(e), url=url)
|
||||
else:
|
||||
module.fail_json(msg=str(e), status=e[0], reason=str(e), url=url)
|
||||
|
||||
resp = conn.getresponse()
|
||||
|
||||
if resp.status in range(200, 300):
|
||||
module.exit_json(changed=True, status=resp.status, reason=resp.reason, url=url)
|
||||
else:
|
||||
module.fail_json(msg='Failed to upload', status=resp.status, reason=resp.reason, length=resp.length, version=resp.version, headers=resp.getheaders(), chunked=resp.chunked, url=url)
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
main()
|
0
cloud/webfaction/__init__.py
Normal file
0
cloud/webfaction/__init__.py
Normal file
180
cloud/webfaction/webfaction_app.py
Normal file
180
cloud/webfaction/webfaction_app.py
Normal file
|
@ -0,0 +1,180 @@
|
|||
#! /usr/bin/python
|
||||
#
|
||||
# Create a Webfaction application using Ansible and the Webfaction API
|
||||
#
|
||||
# Valid application types can be found by looking here:
|
||||
# http://docs.webfaction.com/xmlrpc-api/apps.html#application-types
|
||||
#
|
||||
# ------------------------------------------
|
||||
#
|
||||
# (c) Quentin Stafford-Fraser 2015
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: webfaction_app
|
||||
short_description: Add or remove applications on a Webfaction host
|
||||
description:
|
||||
- Add or remove applications on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
|
||||
author: Quentin Stafford-Fraser (@quentinsf)
|
||||
version_added: "2.0"
|
||||
notes:
|
||||
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
|
||||
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the application
|
||||
required: true
|
||||
|
||||
state:
|
||||
description:
|
||||
- Whether the application should exist
|
||||
required: false
|
||||
choices: ['present', 'absent']
|
||||
default: "present"
|
||||
|
||||
type:
|
||||
description:
|
||||
- The type of application to create. See the Webfaction docs at http://docs.webfaction.com/xmlrpc-api/apps.html for a list.
|
||||
required: true
|
||||
|
||||
autostart:
|
||||
description:
|
||||
- Whether the app should restart with an autostart.cgi script
|
||||
required: false
|
||||
default: "no"
|
||||
|
||||
extra_info:
|
||||
description:
|
||||
- Any extra parameters required by the app
|
||||
required: false
|
||||
default: null
|
||||
|
||||
open_port:
|
||||
required: false
|
||||
default: false
|
||||
|
||||
login_name:
|
||||
description:
|
||||
- The webfaction account to use
|
||||
required: true
|
||||
|
||||
login_password:
|
||||
description:
|
||||
- The webfaction password to use
|
||||
required: true
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a test app
|
||||
webfaction_app:
|
||||
name="my_wsgi_app1"
|
||||
state=present
|
||||
type=mod_wsgi35-python27
|
||||
login_name={{webfaction_user}}
|
||||
login_password={{webfaction_passwd}}
|
||||
'''
|
||||
|
||||
import xmlrpclib
|
||||
|
||||
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
name = dict(required=True),
|
||||
state = dict(required=False, choices=['present', 'absent'], default='present'),
|
||||
type = dict(required=True),
|
||||
autostart = dict(required=False, choices=BOOLEANS, default=False),
|
||||
extra_info = dict(required=False, default=""),
|
||||
port_open = dict(required=False, choices=BOOLEANS, default=False),
|
||||
login_name = dict(required=True),
|
||||
login_password = dict(required=True),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
app_name = module.params['name']
|
||||
app_type = module.params['type']
|
||||
app_state = module.params['state']
|
||||
|
||||
session_id, account = webfaction.login(
|
||||
module.params['login_name'],
|
||||
module.params['login_password']
|
||||
)
|
||||
|
||||
app_list = webfaction.list_apps(session_id)
|
||||
app_map = dict([(i['name'], i) for i in app_list])
|
||||
existing_app = app_map.get(app_name)
|
||||
|
||||
result = {}
|
||||
|
||||
# Here's where the real stuff happens
|
||||
|
||||
if app_state == 'present':
|
||||
|
||||
# Does an app with this name already exist?
|
||||
if existing_app:
|
||||
if existing_app['type'] != app_type:
|
||||
module.fail_json(msg="App already exists with different type. Please fix by hand.")
|
||||
|
||||
# If it exists with the right type, we don't change it
|
||||
# Should check other parameters.
|
||||
module.exit_json(
|
||||
changed = False,
|
||||
)
|
||||
|
||||
if not module.check_mode:
|
||||
# If this isn't a dry run, create the app
|
||||
result.update(
|
||||
webfaction.create_app(
|
||||
session_id, app_name, app_type,
|
||||
module.boolean(module.params['autostart']),
|
||||
module.params['extra_info'],
|
||||
module.boolean(module.params['port_open'])
|
||||
)
|
||||
)
|
||||
|
||||
elif app_state == 'absent':
|
||||
|
||||
# If the app's already not there, nothing changed.
|
||||
if not existing_app:
|
||||
module.exit_json(
|
||||
changed = False,
|
||||
)
|
||||
|
||||
if not module.check_mode:
|
||||
# If this isn't a dry run, delete the app
|
||||
result.update(
|
||||
webfaction.delete_app(session_id, app_name)
|
||||
)
|
||||
|
||||
else:
|
||||
module.fail_json(msg="Unknown state specified: {}".format(app_state))
|
||||
|
||||
|
||||
module.exit_json(
|
||||
changed = True,
|
||||
result = result
|
||||
)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
||||
|
184
cloud/webfaction/webfaction_db.py
Normal file
184
cloud/webfaction/webfaction_db.py
Normal file
|
@ -0,0 +1,184 @@
|
|||
#! /usr/bin/python
|
||||
#
|
||||
# Create a webfaction database using Ansible and the Webfaction API
|
||||
#
|
||||
# ------------------------------------------
|
||||
#
|
||||
# (c) Quentin Stafford-Fraser and Andy Baker 2015
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: webfaction_db
|
||||
short_description: Add or remove a database on Webfaction
|
||||
description:
|
||||
- Add or remove a database on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
|
||||
author: Quentin Stafford-Fraser (@quentinsf)
|
||||
version_added: "2.0"
|
||||
notes:
|
||||
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
|
||||
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
|
||||
options:
|
||||
|
||||
name:
|
||||
description:
|
||||
- The name of the database
|
||||
required: true
|
||||
|
||||
state:
|
||||
description:
|
||||
- Whether the database should exist
|
||||
required: false
|
||||
choices: ['present', 'absent']
|
||||
default: "present"
|
||||
|
||||
type:
|
||||
description:
|
||||
- The type of database to create.
|
||||
required: true
|
||||
choices: ['mysql', 'postgresql']
|
||||
|
||||
password:
|
||||
description:
|
||||
- The password for the new database user.
|
||||
required: false
|
||||
default: None
|
||||
|
||||
login_name:
|
||||
description:
|
||||
- The webfaction account to use
|
||||
required: true
|
||||
|
||||
login_password:
|
||||
description:
|
||||
- The webfaction password to use
|
||||
required: true
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# This will also create a default DB user with the same
|
||||
# name as the database, and the specified password.
|
||||
|
||||
- name: Create a database
|
||||
webfaction_db:
|
||||
name: "{{webfaction_user}}_db1"
|
||||
password: mytestsql
|
||||
type: mysql
|
||||
login_name: "{{webfaction_user}}"
|
||||
login_password: "{{webfaction_passwd}}"
|
||||
|
||||
# Note that, for symmetry's sake, deleting a database using
|
||||
# 'state: absent' will also delete the matching user.
|
||||
|
||||
'''
|
||||
|
||||
import socket
|
||||
import xmlrpclib
|
||||
|
||||
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
name = dict(required=True),
|
||||
state = dict(required=False, choices=['present', 'absent'], default='present'),
|
||||
# You can specify an IP address or hostname.
|
||||
type = dict(required=True),
|
||||
password = dict(required=False, default=None),
|
||||
login_name = dict(required=True),
|
||||
login_password = dict(required=True),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
db_name = module.params['name']
|
||||
db_state = module.params['state']
|
||||
db_type = module.params['type']
|
||||
db_passwd = module.params['password']
|
||||
|
||||
session_id, account = webfaction.login(
|
||||
module.params['login_name'],
|
||||
module.params['login_password']
|
||||
)
|
||||
|
||||
db_list = webfaction.list_dbs(session_id)
|
||||
db_map = dict([(i['name'], i) for i in db_list])
|
||||
existing_db = db_map.get(db_name)
|
||||
|
||||
user_list = webfaction.list_db_users(session_id)
|
||||
user_map = dict([(i['username'], i) for i in user_list])
|
||||
existing_user = user_map.get(db_name)
|
||||
|
||||
result = {}
|
||||
|
||||
# Here's where the real stuff happens
|
||||
|
||||
if db_state == 'present':
|
||||
|
||||
# Does an database with this name already exist?
|
||||
if existing_db:
|
||||
# Yes, but of a different type - fail
|
||||
if existing_db['db_type'] != db_type:
|
||||
module.fail_json(msg="Database already exists but is a different type. Please fix by hand.")
|
||||
|
||||
# If it exists with the right type, we don't change anything.
|
||||
module.exit_json(
|
||||
changed = False,
|
||||
)
|
||||
|
||||
|
||||
if not module.check_mode:
|
||||
# If this isn't a dry run, create the db
|
||||
# and default user.
|
||||
result.update(
|
||||
webfaction.create_db(
|
||||
session_id, db_name, db_type, db_passwd
|
||||
)
|
||||
)
|
||||
|
||||
elif db_state == 'absent':
|
||||
|
||||
# If this isn't a dry run...
|
||||
if not module.check_mode:
|
||||
|
||||
if not (existing_db or existing_user):
|
||||
module.exit_json(changed = False,)
|
||||
|
||||
if existing_db:
|
||||
# Delete the db if it exists
|
||||
result.update(
|
||||
webfaction.delete_db(session_id, db_name, db_type)
|
||||
)
|
||||
|
||||
if existing_user:
|
||||
# Delete the default db user if it exists
|
||||
result.update(
|
||||
webfaction.delete_db_user(session_id, db_name, db_type)
|
||||
)
|
||||
|
||||
else:
|
||||
module.fail_json(msg="Unknown state specified: {}".format(db_state))
|
||||
|
||||
module.exit_json(
|
||||
changed = True,
|
||||
result = result
|
||||
)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
||||
|
171
cloud/webfaction/webfaction_domain.py
Normal file
171
cloud/webfaction/webfaction_domain.py
Normal file
|
@ -0,0 +1,171 @@
|
|||
#! /usr/bin/python
|
||||
#
|
||||
# Create Webfaction domains and subdomains using Ansible and the Webfaction API
|
||||
#
|
||||
# ------------------------------------------
|
||||
#
|
||||
# (c) Quentin Stafford-Fraser 2015
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: webfaction_domain
|
||||
short_description: Add or remove domains and subdomains on Webfaction
|
||||
description:
|
||||
- Add or remove domains or subdomains on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
|
||||
author: Quentin Stafford-Fraser (@quentinsf)
|
||||
version_added: "2.0"
|
||||
notes:
|
||||
- If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted.
|
||||
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
|
||||
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
|
||||
|
||||
options:
|
||||
|
||||
name:
|
||||
description:
|
||||
- The name of the domain
|
||||
required: true
|
||||
|
||||
state:
|
||||
description:
|
||||
- Whether the domain should exist
|
||||
required: false
|
||||
choices: ['present', 'absent']
|
||||
default: "present"
|
||||
|
||||
subdomains:
|
||||
description:
|
||||
- Any subdomains to create.
|
||||
required: false
|
||||
default: null
|
||||
|
||||
login_name:
|
||||
description:
|
||||
- The webfaction account to use
|
||||
required: true
|
||||
|
||||
login_password:
|
||||
description:
|
||||
- The webfaction password to use
|
||||
required: true
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a test domain
|
||||
webfaction_domain:
|
||||
name: mydomain.com
|
||||
state: present
|
||||
subdomains:
|
||||
- www
|
||||
- blog
|
||||
login_name: "{{webfaction_user}}"
|
||||
login_password: "{{webfaction_passwd}}"
|
||||
|
||||
- name: Delete test domain and any subdomains
|
||||
webfaction_domain:
|
||||
name: mydomain.com
|
||||
state: absent
|
||||
login_name: "{{webfaction_user}}"
|
||||
login_password: "{{webfaction_passwd}}"
|
||||
|
||||
'''
|
||||
|
||||
import socket
|
||||
import xmlrpclib
|
||||
|
||||
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
name = dict(required=True),
|
||||
state = dict(required=False, choices=['present', 'absent'], default='present'),
|
||||
subdomains = dict(required=False, default=[]),
|
||||
login_name = dict(required=True),
|
||||
login_password = dict(required=True),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
domain_name = module.params['name']
|
||||
domain_state = module.params['state']
|
||||
domain_subdomains = module.params['subdomains']
|
||||
|
||||
session_id, account = webfaction.login(
|
||||
module.params['login_name'],
|
||||
module.params['login_password']
|
||||
)
|
||||
|
||||
domain_list = webfaction.list_domains(session_id)
|
||||
domain_map = dict([(i['domain'], i) for i in domain_list])
|
||||
existing_domain = domain_map.get(domain_name)
|
||||
|
||||
result = {}
|
||||
|
||||
# Here's where the real stuff happens
|
||||
|
||||
if domain_state == 'present':
|
||||
|
||||
# Does an app with this name already exist?
|
||||
if existing_domain:
|
||||
|
||||
if set(existing_domain['subdomains']) >= set(domain_subdomains):
|
||||
# If it exists with the right subdomains, we don't change anything.
|
||||
module.exit_json(
|
||||
changed = False,
|
||||
)
|
||||
|
||||
positional_args = [session_id, domain_name] + domain_subdomains
|
||||
|
||||
if not module.check_mode:
|
||||
# If this isn't a dry run, create the app
|
||||
# print positional_args
|
||||
result.update(
|
||||
webfaction.create_domain(
|
||||
*positional_args
|
||||
)
|
||||
)
|
||||
|
||||
elif domain_state == 'absent':
|
||||
|
||||
# If the app's already not there, nothing changed.
|
||||
if not existing_domain:
|
||||
module.exit_json(
|
||||
changed = False,
|
||||
)
|
||||
|
||||
positional_args = [session_id, domain_name] + domain_subdomains
|
||||
|
||||
if not module.check_mode:
|
||||
# If this isn't a dry run, delete the app
|
||||
result.update(
|
||||
webfaction.delete_domain(*positional_args)
|
||||
)
|
||||
|
||||
else:
|
||||
module.fail_json(msg="Unknown state specified: {}".format(domain_state))
|
||||
|
||||
module.exit_json(
|
||||
changed = True,
|
||||
result = result
|
||||
)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
||||
|
139
cloud/webfaction/webfaction_mailbox.py
Normal file
139
cloud/webfaction/webfaction_mailbox.py
Normal file
|
@ -0,0 +1,139 @@
|
|||
#! /usr/bin/python
|
||||
#
|
||||
# Create webfaction mailbox using Ansible and the Webfaction API
|
||||
#
|
||||
# ------------------------------------------
|
||||
# (c) Quentin Stafford-Fraser and Andy Baker 2015
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: webfaction_mailbox
|
||||
short_description: Add or remove mailboxes on Webfaction
|
||||
description:
|
||||
- Add or remove mailboxes on a Webfaction account. Further documentation at http://github.com/quentinsf/ansible-webfaction.
|
||||
author: Quentin Stafford-Fraser (@quentinsf)
|
||||
version_added: "2.0"
|
||||
notes:
|
||||
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
|
||||
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
|
||||
options:
|
||||
|
||||
mailbox_name:
|
||||
description:
|
||||
- The name of the mailbox
|
||||
required: true
|
||||
|
||||
mailbox_password:
|
||||
description:
|
||||
- The password for the mailbox
|
||||
required: true
|
||||
default: null
|
||||
|
||||
state:
|
||||
description:
|
||||
- Whether the mailbox should exist
|
||||
required: false
|
||||
choices: ['present', 'absent']
|
||||
default: "present"
|
||||
|
||||
login_name:
|
||||
description:
|
||||
- The webfaction account to use
|
||||
required: true
|
||||
|
||||
login_password:
|
||||
description:
|
||||
- The webfaction password to use
|
||||
required: true
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a mailbox
|
||||
webfaction_mailbox:
|
||||
mailbox_name="mybox"
|
||||
mailbox_password="myboxpw"
|
||||
state=present
|
||||
login_name={{webfaction_user}}
|
||||
login_password={{webfaction_passwd}}
|
||||
'''
|
||||
|
||||
import socket
|
||||
import xmlrpclib
|
||||
|
||||
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
mailbox_name=dict(required=True),
|
||||
mailbox_password=dict(required=True),
|
||||
state=dict(required=False, choices=['present', 'absent'], default='present'),
|
||||
login_name=dict(required=True),
|
||||
login_password=dict(required=True),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
mailbox_name = module.params['mailbox_name']
|
||||
site_state = module.params['state']
|
||||
|
||||
session_id, account = webfaction.login(
|
||||
module.params['login_name'],
|
||||
module.params['login_password']
|
||||
)
|
||||
|
||||
mailbox_list = webfaction.list_mailboxes(session_id)
|
||||
existing_mailbox = mailbox_name in mailbox_list
|
||||
|
||||
result = {}
|
||||
|
||||
# Here's where the real stuff happens
|
||||
|
||||
if site_state == 'present':
|
||||
|
||||
# Does a mailbox with this name already exist?
|
||||
if existing_mailbox:
|
||||
module.exit_json(changed=False,)
|
||||
|
||||
positional_args = [session_id, mailbox_name]
|
||||
|
||||
if not module.check_mode:
|
||||
# If this isn't a dry run, create the mailbox
|
||||
result.update(webfaction.create_mailbox(*positional_args))
|
||||
|
||||
elif site_state == 'absent':
|
||||
|
||||
# If the mailbox is already not there, nothing changed.
|
||||
if not existing_mailbox:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
if not module.check_mode:
|
||||
# If this isn't a dry run, delete the mailbox
|
||||
result.update(webfaction.delete_mailbox(session_id, mailbox_name))
|
||||
|
||||
else:
|
||||
module.fail_json(msg="Unknown state specified: {}".format(site_state))
|
||||
|
||||
module.exit_json(changed=True, result=result)
|
||||
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
||||
|
208
cloud/webfaction/webfaction_site.py
Normal file
208
cloud/webfaction/webfaction_site.py
Normal file
|
@ -0,0 +1,208 @@
|
|||
#! /usr/bin/python
|
||||
#
|
||||
# Create Webfaction website using Ansible and the Webfaction API
|
||||
#
|
||||
# ------------------------------------------
|
||||
#
|
||||
# (c) Quentin Stafford-Fraser 2015
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: webfaction_site
|
||||
short_description: Add or remove a website on a Webfaction host
|
||||
description:
|
||||
- Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
|
||||
author: Quentin Stafford-Fraser (@quentinsf)
|
||||
version_added: "2.0"
|
||||
notes:
|
||||
- Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name.
|
||||
- If a site of the same name exists in the account but on a different host, the operation will exit.
|
||||
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
|
||||
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
|
||||
|
||||
options:
|
||||
|
||||
name:
|
||||
description:
|
||||
- The name of the website
|
||||
required: true
|
||||
|
||||
state:
|
||||
description:
|
||||
- Whether the website should exist
|
||||
required: false
|
||||
choices: ['present', 'absent']
|
||||
default: "present"
|
||||
|
||||
host:
|
||||
description:
|
||||
- The webfaction host on which the site should be created.
|
||||
required: true
|
||||
|
||||
https:
|
||||
description:
|
||||
- Whether or not to use HTTPS
|
||||
required: false
|
||||
choices: BOOLEANS
|
||||
default: 'false'
|
||||
|
||||
site_apps:
|
||||
description:
|
||||
- A mapping of URLs to apps
|
||||
required: false
|
||||
|
||||
subdomains:
|
||||
description:
|
||||
- A list of subdomains associated with this site.
|
||||
required: false
|
||||
default: null
|
||||
|
||||
login_name:
|
||||
description:
|
||||
- The webfaction account to use
|
||||
required: true
|
||||
|
||||
login_password:
|
||||
description:
|
||||
- The webfaction password to use
|
||||
required: true
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: create website
|
||||
webfaction_site:
|
||||
name: testsite1
|
||||
state: present
|
||||
host: myhost.webfaction.com
|
||||
subdomains:
|
||||
- 'testsite1.my_domain.org'
|
||||
site_apps:
|
||||
- ['testapp1', '/']
|
||||
https: no
|
||||
login_name: "{{webfaction_user}}"
|
||||
login_password: "{{webfaction_passwd}}"
|
||||
'''
|
||||
|
||||
import socket
|
||||
import xmlrpclib
|
||||
|
||||
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
name = dict(required=True),
|
||||
state = dict(required=False, choices=['present', 'absent'], default='present'),
|
||||
# You can specify an IP address or hostname.
|
||||
host = dict(required=True),
|
||||
https = dict(required=False, choices=BOOLEANS, default=False),
|
||||
subdomains = dict(required=False, default=[]),
|
||||
site_apps = dict(required=False, default=[]),
|
||||
login_name = dict(required=True),
|
||||
login_password = dict(required=True),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
site_name = module.params['name']
|
||||
site_state = module.params['state']
|
||||
site_host = module.params['host']
|
||||
site_ip = socket.gethostbyname(site_host)
|
||||
|
||||
session_id, account = webfaction.login(
|
||||
module.params['login_name'],
|
||||
module.params['login_password']
|
||||
)
|
||||
|
||||
site_list = webfaction.list_websites(session_id)
|
||||
site_map = dict([(i['name'], i) for i in site_list])
|
||||
existing_site = site_map.get(site_name)
|
||||
|
||||
result = {}
|
||||
|
||||
# Here's where the real stuff happens
|
||||
|
||||
if site_state == 'present':
|
||||
|
||||
# Does a site with this name already exist?
|
||||
if existing_site:
|
||||
|
||||
# If yes, but it's on a different IP address, then fail.
|
||||
# If we wanted to allow relocation, we could add a 'relocate=true' option
|
||||
# which would get the existing IP address, delete the site there, and create it
|
||||
# at the new address. A bit dangerous, perhaps, so for now we'll require manual
|
||||
# deletion if it's on another host.
|
||||
|
||||
if existing_site['ip'] != site_ip:
|
||||
module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
|
||||
|
||||
# If it's on this host and the key parameters are the same, nothing needs to be done.
|
||||
|
||||
if (existing_site['https'] == module.boolean(module.params['https'])) and \
|
||||
(set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
|
||||
(dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
|
||||
module.exit_json(
|
||||
changed = False
|
||||
)
|
||||
|
||||
positional_args = [
|
||||
session_id, site_name, site_ip,
|
||||
module.boolean(module.params['https']),
|
||||
module.params['subdomains'],
|
||||
]
|
||||
for a in module.params['site_apps']:
|
||||
positional_args.append( (a[0], a[1]) )
|
||||
|
||||
if not module.check_mode:
|
||||
# If this isn't a dry run, create or modify the site
|
||||
result.update(
|
||||
webfaction.create_website(
|
||||
*positional_args
|
||||
) if not existing_site else webfaction.update_website (
|
||||
*positional_args
|
||||
)
|
||||
)
|
||||
|
||||
elif site_state == 'absent':
|
||||
|
||||
# If the site's already not there, nothing changed.
|
||||
if not existing_site:
|
||||
module.exit_json(
|
||||
changed = False,
|
||||
)
|
||||
|
||||
if not module.check_mode:
|
||||
# If this isn't a dry run, delete the site
|
||||
result.update(
|
||||
webfaction.delete_website(session_id, site_name, site_ip)
|
||||
)
|
||||
|
||||
else:
|
||||
module.fail_json(msg="Unknown state specified: {}".format(site_state))
|
||||
|
||||
module.exit_json(
|
||||
changed = True,
|
||||
result = result
|
||||
)
|
||||
|
||||
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
||||
|
|
@ -42,7 +42,7 @@ requirements:
|
|||
- python-consul
|
||||
- requests
|
||||
version_added: "2.0"
|
||||
author: '"Steve Gargan (@sgargan)" <steve.gargan@gmail.com>'
|
||||
author: "Steve Gargan (@sgargan)"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
|
|
|
@ -30,7 +30,7 @@ requirements:
|
|||
- pyhcl
|
||||
- requests
|
||||
version_added: "2.0"
|
||||
author: '"Steve Gargan (@sgargan)" <steve.gargan@gmail.com>'
|
||||
author: "Steve Gargan (@sgargan)"
|
||||
options:
|
||||
mgmt_token:
|
||||
description:
|
||||
|
|
|
@ -32,7 +32,7 @@ requirements:
|
|||
- python-consul
|
||||
- requests
|
||||
version_added: "2.0"
|
||||
author: '"Steve Gargan (@sgargan)" <steve.gargan@gmail.com>'
|
||||
author: "Steve Gargan (@sgargan)"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
|
|
|
@ -30,7 +30,7 @@ requirements:
|
|||
- python-consul
|
||||
- requests
|
||||
version_added: "2.0"
|
||||
author: '"Steve Gargan (@sgargan)" <steve.gargan@gmail.com>'
|
||||
author: "Steve Gargan (@sgargan)"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
|
|
0
commands/__init__.py
Normal file
0
commands/__init__.py
Normal file
177
commands/expect.py
Normal file
177
commands/expect.py
Normal file
|
@ -0,0 +1,177 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2015, Matt Martz <matt@sivel.net>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import datetime
|
||||
|
||||
try:
|
||||
import pexpect
|
||||
HAS_PEXPECT = True
|
||||
except ImportError:
|
||||
HAS_PEXPECT = False
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: expect
|
||||
version_added: 2.0
|
||||
short_description: Executes a command and responds to prompts
|
||||
description:
|
||||
- The M(expect) module executes a command and responds to prompts
|
||||
- The given command will be executed on all selected nodes. It will not be
|
||||
processed through the shell, so variables like C($HOME) and operations
|
||||
like C("<"), C(">"), C("|"), and C("&") will not work
|
||||
options:
|
||||
command:
|
||||
description:
|
||||
- the command module takes command to run.
|
||||
required: true
|
||||
creates:
|
||||
description:
|
||||
- a filename, when it already exists, this step will B(not) be run.
|
||||
required: false
|
||||
removes:
|
||||
description:
|
||||
- a filename, when it does not exist, this step will B(not) be run.
|
||||
required: false
|
||||
chdir:
|
||||
description:
|
||||
- cd into this directory before running the command
|
||||
required: false
|
||||
responses:
|
||||
description:
|
||||
- Mapping of expected string and string to respond with
|
||||
required: true
|
||||
timeout:
|
||||
description:
|
||||
- Amount of time in seconds to wait for the expected strings
|
||||
default: 30
|
||||
echo:
|
||||
description:
|
||||
- Whether or not to echo out your response strings
|
||||
default: false
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- pexpect >= 3.3
|
||||
notes:
|
||||
- If you want to run a command through the shell (say you are using C(<),
|
||||
C(>), C(|), etc), you must specify a shell in the command such as
|
||||
C(/bin/bash -c "/path/to/something | grep else")
|
||||
author: "Matt Martz (@sivel)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- expect:
|
||||
command: passwd username
|
||||
responses:
|
||||
(?i)password: "MySekretPa$$word"
|
||||
'''
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
command=dict(required=True),
|
||||
chdir=dict(),
|
||||
creates=dict(),
|
||||
removes=dict(),
|
||||
responses=dict(type='dict', required=True),
|
||||
timeout=dict(type='int', default=30),
|
||||
echo=dict(type='bool', default=False),
|
||||
)
|
||||
)
|
||||
|
||||
if not HAS_PEXPECT:
|
||||
module.fail_json(msg='The pexpect python module is required')
|
||||
|
||||
chdir = module.params['chdir']
|
||||
args = module.params['command']
|
||||
creates = module.params['creates']
|
||||
removes = module.params['removes']
|
||||
responses = module.params['responses']
|
||||
timeout = module.params['timeout']
|
||||
echo = module.params['echo']
|
||||
|
||||
events = dict()
|
||||
for key, value in responses.iteritems():
|
||||
events[key.decode()] = u'%s\n' % value.rstrip('\n').decode()
|
||||
|
||||
if args.strip() == '':
|
||||
module.fail_json(rc=256, msg="no command given")
|
||||
|
||||
if chdir:
|
||||
chdir = os.path.abspath(os.path.expanduser(chdir))
|
||||
os.chdir(chdir)
|
||||
|
||||
if creates:
|
||||
# do not run the command if the line contains creates=filename
|
||||
# and the filename already exists. This allows idempotence
|
||||
# of command executions.
|
||||
v = os.path.expanduser(creates)
|
||||
if os.path.exists(v):
|
||||
module.exit_json(
|
||||
cmd=args,
|
||||
stdout="skipped, since %s exists" % v,
|
||||
changed=False,
|
||||
stderr=False,
|
||||
rc=0
|
||||
)
|
||||
|
||||
if removes:
|
||||
# do not run the command if the line contains removes=filename
|
||||
# and the filename does not exist. This allows idempotence
|
||||
# of command executions.
|
||||
v = os.path.expanduser(removes)
|
||||
if not os.path.exists(v):
|
||||
module.exit_json(
|
||||
cmd=args,
|
||||
stdout="skipped, since %s does not exist" % v,
|
||||
changed=False,
|
||||
stderr=False,
|
||||
rc=0
|
||||
)
|
||||
|
||||
startd = datetime.datetime.now()
|
||||
|
||||
try:
|
||||
out, rc = pexpect.runu(args, timeout=timeout, withexitstatus=True,
|
||||
events=events, cwd=chdir, echo=echo)
|
||||
except pexpect.ExceptionPexpect, e:
|
||||
module.fail_json(msg='%s' % e)
|
||||
|
||||
endd = datetime.datetime.now()
|
||||
delta = endd - startd
|
||||
|
||||
if out is None:
|
||||
out = ''
|
||||
|
||||
module.exit_json(
|
||||
cmd=args,
|
||||
stdout=out.rstrip('\r\n'),
|
||||
rc=rc,
|
||||
start=str(startd),
|
||||
end=str(endd),
|
||||
delta=str(delta),
|
||||
changed=True,
|
||||
)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
main()
|
|
@ -99,7 +99,7 @@ notes:
|
|||
- Requires the pymongo Python package on the remote host, version 2.4.2+. This
|
||||
can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
|
||||
requirements: [ "pymongo" ]
|
||||
author: '"Elliott Foster (@elliotttf)" <elliott@fourkitchens.com>'
|
||||
author: "Elliott Foster (@elliotttf)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -27,8 +27,8 @@ description:
|
|||
the status of the cluster.
|
||||
version_added: "1.2"
|
||||
author:
|
||||
- '"James Martin (@jsmartin)" <jmartin@ansible.com>'
|
||||
- '"Drew Kerrigan (@drewkerrigan)" <dkerrigan@basho.com>'
|
||||
- "James Martin (@jsmartin)"
|
||||
- "Drew Kerrigan (@drewkerrigan)"
|
||||
options:
|
||||
command:
|
||||
description:
|
||||
|
|
|
@ -30,7 +30,7 @@ short_description: Manage MySQL replication
|
|||
description:
|
||||
- Manages MySQL server replication, slave, master status get and change master host.
|
||||
version_added: "1.3"
|
||||
author: '"Balazs Pocze (@banyek)" <banyek@gawker.com>'
|
||||
author: "Balazs Pocze (@banyek)"
|
||||
options:
|
||||
mode:
|
||||
description:
|
||||
|
|
|
@ -23,8 +23,8 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: patch
|
||||
author:
|
||||
- '"Jakub Jirutka (@jirutka)" <jakub@jirutka.cz>'
|
||||
- '"Luis Alberto Perez Lazaro (@luisperlaz)" <luisperlazaro@gmail.com>'
|
||||
- "Jakub Jirutka (@jirutka)"
|
||||
- "Luis Alberto Perez Lazaro (@luisperlaz)"
|
||||
version_added: 1.9
|
||||
description:
|
||||
- Apply patch files using the GNU patch tool.
|
||||
|
@ -65,6 +65,14 @@ options:
|
|||
required: false
|
||||
type: "int"
|
||||
default: "0"
|
||||
backup:
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- passes --backup --version-control=numbered to patch,
|
||||
producing numbered backup copies
|
||||
required: false
|
||||
type: "bool"
|
||||
default: "False"
|
||||
note:
|
||||
- This module requires GNU I(patch) utility to be installed on the remote host.
|
||||
'''
|
||||
|
@ -101,7 +109,7 @@ def is_already_applied(patch_func, patch_file, basedir, dest_file=None, strip=0)
|
|||
return rc == 0
|
||||
|
||||
|
||||
def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_run=False):
|
||||
def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_run=False, backup=False):
|
||||
opts = ['--quiet', '--forward', '--batch', '--reject-file=-',
|
||||
"--strip=%s" % strip, "--directory='%s'" % basedir,
|
||||
"--input='%s'" % patch_file]
|
||||
|
@ -109,6 +117,8 @@ def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_ru
|
|||
opts.append('--dry-run')
|
||||
if dest_file:
|
||||
opts.append("'%s'" % dest_file)
|
||||
if backup:
|
||||
opts.append('--backup --version-control=numbered')
|
||||
|
||||
(rc, out, err) = patch_func(opts)
|
||||
if rc != 0:
|
||||
|
@ -124,6 +134,9 @@ def main():
|
|||
'basedir': {},
|
||||
'strip': {'default': 0, 'type': 'int'},
|
||||
'remote_src': {'default': False, 'type': 'bool'},
|
||||
# NB: for 'backup' parameter, semantics is slightly different from standard
|
||||
# since patch will create numbered copies, not strftime("%Y-%m-%d@%H:%M:%S~")
|
||||
'backup': { 'default': False, 'type': 'bool' }
|
||||
},
|
||||
required_one_of=[['dest', 'basedir']],
|
||||
supports_check_mode=True
|
||||
|
@ -156,8 +169,8 @@ def main():
|
|||
changed = False
|
||||
if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip):
|
||||
try:
|
||||
apply_patch(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip,
|
||||
dry_run=module.check_mode)
|
||||
apply_patch( patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip,
|
||||
dry_run=module.check_mode, backup=p.backup )
|
||||
changed = True
|
||||
except PatchError, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rabbitmq_binding
|
||||
author: '"Manuel Sousa (@manuel-sousa)" <manuel.sousa@gmail.com>'
|
||||
author: "Manuel Sousa (@manuel-sousa)"
|
||||
version_added: "2.0"
|
||||
|
||||
short_description: This module manages rabbitMQ bindings
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rabbitmq_exchange
|
||||
author: '"Manuel Sousa (@manuel-sousa)" <manuel.sousa@gmail.com>'
|
||||
author: "Manuel Sousa (@manuel-sousa)"
|
||||
version_added: "2.0"
|
||||
|
||||
short_description: This module manages rabbitMQ exchanges
|
||||
|
|
|
@ -26,7 +26,7 @@ short_description: Manage the state of policies in RabbitMQ.
|
|||
description:
|
||||
- Manage the state of a virtual host in RabbitMQ.
|
||||
version_added: "1.5"
|
||||
author: '"John Dewey (@retr0h)" <john@dewey.ws>'
|
||||
author: "John Dewey (@retr0h)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rabbitmq_queue
|
||||
author: '"Manuel Sousa (@manuel-sousa)" <manuel.sousa@gmail.com>'
|
||||
author: "Manuel Sousa (@manuel-sousa)"
|
||||
version_added: "2.0"
|
||||
|
||||
short_description: This module manages rabbitMQ queues
|
||||
|
|
|
@ -22,7 +22,7 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: airbrake_deployment
|
||||
version_added: "1.2"
|
||||
author: '"Bruce Pennypacker (@bpennypacker)" <bruce@pennypacker.org>'
|
||||
author: "Bruce Pennypacker (@bpennypacker)"
|
||||
short_description: Notify airbrake about app deployments
|
||||
description:
|
||||
- Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking)
|
||||
|
@ -61,8 +61,7 @@ options:
|
|||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ urllib, urllib2 ]
|
||||
requirements: []
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -72,6 +71,8 @@ EXAMPLES = '''
|
|||
revision=4.2
|
||||
'''
|
||||
|
||||
import urllib
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
|
|
@ -34,7 +34,7 @@ short_description: Manage boundary meters
|
|||
description:
|
||||
- This module manages boundary meters
|
||||
version_added: "1.3"
|
||||
author: '"curtis (@ccollicutt)" <curtis@serverascode.com>'
|
||||
author: "curtis (@ccollicutt)"
|
||||
requirements:
|
||||
- Boundary API access
|
||||
- bprobe is required to send data, but not to register a meter
|
||||
|
|
|
@ -14,7 +14,7 @@ description:
|
|||
- "Allows to post events to DataDog (www.datadoghq.com) service."
|
||||
- "Uses http://docs.datadoghq.com/api/#events API."
|
||||
version_added: "1.3"
|
||||
author: '"Artūras `arturaz` Šlajus (@arturaz)" <x11@arturaz.net>'
|
||||
author: "Artūras `arturaz` Šlajus (@arturaz)"
|
||||
notes: []
|
||||
requirements: [urllib2]
|
||||
options:
|
||||
|
@ -71,7 +71,7 @@ datadog_event: title="Testing from ansible" text="Test!" priority="low"
|
|||
# Post an event with several tags
|
||||
datadog_event: title="Testing from ansible" text="Test!"
|
||||
api_key="6873258723457823548234234234"
|
||||
tags=aa,bb,cc
|
||||
tags=aa,bb,#host:{{ inventory_hostname }}
|
||||
'''
|
||||
|
||||
import socket
|
||||
|
@ -86,7 +86,7 @@ def main():
|
|||
priority=dict(
|
||||
required=False, default='normal', choices=['normal', 'low']
|
||||
),
|
||||
tags=dict(required=False, default=None),
|
||||
tags=dict(required=False, default=None, type='list'),
|
||||
alert_type=dict(
|
||||
required=False, default='info',
|
||||
choices=['error', 'warning', 'info', 'success']
|
||||
|
@ -116,7 +116,7 @@ def post_event(module):
|
|||
if module.params['date_happened'] != None:
|
||||
body['date_happened'] = module.params['date_happened']
|
||||
if module.params['tags'] != None:
|
||||
body['tags'] = module.params['tags'].split(",")
|
||||
body['tags'] = module.params['tags']
|
||||
if module.params['aggregation_key'] != None:
|
||||
body['aggregation_key'] = module.params['aggregation_key']
|
||||
if module.params['source_type_name'] != None:
|
||||
|
|
283
monitoring/datadog_monitor.py
Normal file
283
monitoring/datadog_monitor.py
Normal file
|
@ -0,0 +1,283 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2015, Sebastian Kornehl <sebastian.kornehl@asideas.de>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
# import module snippets
|
||||
|
||||
# Import Datadog
|
||||
try:
|
||||
from datadog import initialize, api
|
||||
HAS_DATADOG = True
|
||||
except:
|
||||
HAS_DATADOG = False
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: datadog_monitor
|
||||
short_description: Manages Datadog monitors
|
||||
description:
|
||||
- "Manages monitors within Datadog"
|
||||
- "Options like described on http://docs.datadoghq.com/api/"
|
||||
version_added: "2.0"
|
||||
author: "Sebastian Kornehl (@skornehl)"
|
||||
notes: []
|
||||
requirements: [datadog]
|
||||
options:
|
||||
api_key:
|
||||
description: ["Your DataDog API key."]
|
||||
required: true
|
||||
app_key:
|
||||
description: ["Your DataDog app key."]
|
||||
required: true
|
||||
state:
|
||||
description: ["The designated state of the monitor."]
|
||||
required: true
|
||||
choices: ['present', 'absent', 'muted', 'unmuted']
|
||||
type:
|
||||
description: ["The type of the monitor."]
|
||||
required: false
|
||||
default: null
|
||||
choices: ['metric alert', 'service check']
|
||||
query:
|
||||
description: ["he monitor query to notify on with syntax varying depending on what type of monitor you are creating."]
|
||||
required: false
|
||||
default: null
|
||||
name:
|
||||
description: ["The name of the alert."]
|
||||
required: true
|
||||
message:
|
||||
description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events."]
|
||||
required: false
|
||||
default: null
|
||||
silenced:
|
||||
description: ["Dictionary of scopes to timestamps or None. Each scope will be muted until the given POSIX timestamp or forever if the value is None. "]
|
||||
required: false
|
||||
default: ""
|
||||
notify_no_data:
|
||||
description: ["A boolean indicating whether this monitor will notify when data stops reporting.."]
|
||||
required: false
|
||||
default: False
|
||||
no_data_timeframe:
|
||||
description: ["The number of minutes before a monitor will notify when data stops reporting. Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks."]
|
||||
required: false
|
||||
default: 2x timeframe for metric, 2 minutes for service
|
||||
timeout_h:
|
||||
description: ["The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state."]
|
||||
required: false
|
||||
default: null
|
||||
renotify_interval:
|
||||
description: ["The number of minutes after the last notification before a monitor will re-notify on the current status. It will only re-notify if it's not resolved."]
|
||||
required: false
|
||||
default: null
|
||||
escalation_message:
|
||||
description: ["A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. Not applicable if renotify_interval is None"]
|
||||
required: false
|
||||
default: null
|
||||
notify_audit:
|
||||
description: ["A boolean indicating whether tagged users will be notified on changes to this monitor."]
|
||||
required: false
|
||||
default: False
|
||||
thresholds:
|
||||
description: ["A dictionary of thresholds by status. Because service checks can have multiple thresholds, we don't define them directly in the query."]
|
||||
required: false
|
||||
default: {'ok': 1, 'critical': 1, 'warning': 1}
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create a metric monitor
|
||||
datadog_monitor:
|
||||
type: "metric alert"
|
||||
name: "Test monitor"
|
||||
state: "present"
|
||||
query: "datadog.agent.up".over("host:host1").last(2).count_by_status()"
|
||||
message: "Some message."
|
||||
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
|
||||
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
|
||||
|
||||
# Deletes a monitor
|
||||
datadog_monitor:
|
||||
name: "Test monitor"
|
||||
state: "absent"
|
||||
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
|
||||
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
|
||||
|
||||
# Mutes a monitor
|
||||
datadog_monitor:
|
||||
name: "Test monitor"
|
||||
state: "mute"
|
||||
silenced: '{"*":None}'
|
||||
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
|
||||
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
|
||||
|
||||
# Unmutes a monitor
|
||||
datadog_monitor:
|
||||
name: "Test monitor"
|
||||
state: "unmute"
|
||||
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
|
||||
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
|
||||
'''
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
api_key=dict(required=True),
|
||||
app_key=dict(required=True),
|
||||
state=dict(required=True, choises=['present', 'absent', 'mute', 'unmute']),
|
||||
type=dict(required=False, choises=['metric alert', 'service check']),
|
||||
name=dict(required=True),
|
||||
query=dict(required=False),
|
||||
message=dict(required=False, default=None),
|
||||
silenced=dict(required=False, default=None, type='dict'),
|
||||
notify_no_data=dict(required=False, default=False, choices=BOOLEANS),
|
||||
no_data_timeframe=dict(required=False, default=None),
|
||||
timeout_h=dict(required=False, default=None),
|
||||
renotify_interval=dict(required=False, default=None),
|
||||
escalation_message=dict(required=False, default=None),
|
||||
notify_audit=dict(required=False, default=False, choices=BOOLEANS),
|
||||
thresholds=dict(required=False, type='dict', default={'ok': 1, 'critical': 1, 'warning': 1}),
|
||||
)
|
||||
)
|
||||
|
||||
# Prepare Datadog
|
||||
if not HAS_DATADOG:
|
||||
module.fail_json(msg='datadogpy required for this module')
|
||||
|
||||
options = {
|
||||
'api_key': module.params['api_key'],
|
||||
'app_key': module.params['app_key']
|
||||
}
|
||||
|
||||
initialize(**options)
|
||||
|
||||
if module.params['state'] == 'present':
|
||||
install_monitor(module)
|
||||
elif module.params['state'] == 'absent':
|
||||
delete_monitor(module)
|
||||
elif module.params['state'] == 'mute':
|
||||
mute_monitor(module)
|
||||
elif module.params['state'] == 'unmute':
|
||||
unmute_monitor(module)
|
||||
|
||||
|
||||
def _get_monitor(module):
|
||||
for monitor in api.Monitor.get_all():
|
||||
if monitor['name'] == module.params['name']:
|
||||
return monitor
|
||||
return {}
|
||||
|
||||
|
||||
def _post_monitor(module, options):
|
||||
try:
|
||||
msg = api.Monitor.create(type=module.params['type'], query=module.params['query'],
|
||||
name=module.params['name'], message=module.params['message'],
|
||||
options=options)
|
||||
if 'errors' in msg:
|
||||
module.fail_json(msg=str(msg['errors']))
|
||||
else:
|
||||
module.exit_json(changed=True, msg=msg)
|
||||
except Exception, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
def _equal_dicts(a, b, ignore_keys):
|
||||
ka = set(a).difference(ignore_keys)
|
||||
kb = set(b).difference(ignore_keys)
|
||||
return ka == kb and all(a[k] == b[k] for k in ka)
|
||||
|
||||
def _update_monitor(module, monitor, options):
|
||||
try:
|
||||
msg = api.Monitor.update(id=monitor['id'], query=module.params['query'],
|
||||
name=module.params['name'], message=module.params['message'],
|
||||
options=options)
|
||||
if 'errors' in msg:
|
||||
module.fail_json(msg=str(msg['errors']))
|
||||
elif _equal_dicts(msg, monitor, ['creator', 'overall_state']):
|
||||
module.exit_json(changed=False, msg=msg)
|
||||
else:
|
||||
module.exit_json(changed=True, msg=msg)
|
||||
except Exception, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def install_monitor(module):
|
||||
options = {
|
||||
"silenced": module.params['silenced'],
|
||||
"notify_no_data": module.boolean(module.params['notify_no_data']),
|
||||
"no_data_timeframe": module.params['no_data_timeframe'],
|
||||
"timeout_h": module.params['timeout_h'],
|
||||
"renotify_interval": module.params['renotify_interval'],
|
||||
"escalation_message": module.params['escalation_message'],
|
||||
"notify_audit": module.boolean(module.params['notify_audit']),
|
||||
}
|
||||
|
||||
if module.params['type'] == "service check":
|
||||
options["thresholds"] = module.params['thresholds']
|
||||
|
||||
monitor = _get_monitor(module)
|
||||
if not monitor:
|
||||
_post_monitor(module, options)
|
||||
else:
|
||||
_update_monitor(module, monitor, options)
|
||||
|
||||
|
||||
def delete_monitor(module):
|
||||
monitor = _get_monitor(module)
|
||||
if not monitor:
|
||||
module.exit_json(changed=False)
|
||||
try:
|
||||
msg = api.Monitor.delete(monitor['id'])
|
||||
module.exit_json(changed=True, msg=msg)
|
||||
except Exception, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def mute_monitor(module):
|
||||
monitor = _get_monitor(module)
|
||||
if not monitor:
|
||||
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
|
||||
elif monitor['options']['silenced']:
|
||||
module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
|
||||
elif (module.params['silenced'] is not None
|
||||
and len(set(monitor['options']['silenced']) - set(module.params['silenced'])) == 0):
|
||||
module.exit_json(changed=False)
|
||||
try:
|
||||
if module.params['silenced'] is None or module.params['silenced'] == "":
|
||||
msg = api.Monitor.mute(id=monitor['id'])
|
||||
else:
|
||||
msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
|
||||
module.exit_json(changed=True, msg=msg)
|
||||
except Exception, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def unmute_monitor(module):
|
||||
monitor = _get_monitor(module)
|
||||
if not monitor:
|
||||
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
|
||||
elif not monitor['options']['silenced']:
|
||||
module.exit_json(changed=False)
|
||||
try:
|
||||
msg = api.Monitor.unmute(monitor['id'])
|
||||
module.exit_json(changed=True, msg=msg)
|
||||
except Exception, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
main()
|
|
@ -31,7 +31,6 @@ description:
|
|||
version_added: "1.6"
|
||||
author: "Seth Edwards (@sedward)"
|
||||
requirements:
|
||||
- urllib2
|
||||
- base64
|
||||
options:
|
||||
user:
|
||||
|
@ -107,11 +106,7 @@ EXAMPLES = '''
|
|||
'''
|
||||
|
||||
|
||||
try:
|
||||
import urllib2
|
||||
HAS_URLLIB2 = True
|
||||
except ImportError:
|
||||
HAS_URLLIB2 = False
|
||||
import urllib2
|
||||
|
||||
def post_annotation(module):
|
||||
user = module.params['user']
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: logentries
|
||||
author: '"Ivan Vanderbyl (@ivanvanderbyl)" <ivan@app.io>'
|
||||
author: "Ivan Vanderbyl (@ivanvanderbyl)"
|
||||
short_description: Module for tracking logs via logentries.com
|
||||
description:
|
||||
- Sends logs to LogEntries in realtime
|
||||
|
|
|
@ -39,7 +39,7 @@ options:
|
|||
default: null
|
||||
choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
|
||||
requirements: [ ]
|
||||
author: '"Darryl Stoflet (@dstoflet)" <stoflet@gmail.com>'
|
||||
author: "Darryl Stoflet (@dstoflet)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -77,7 +77,7 @@ def main():
|
|||
# Process 'name' Running - restart pending
|
||||
parts = line.split()
|
||||
if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name:
|
||||
return ' '.join(parts[2:])
|
||||
return ' '.join(parts[2:]).lower()
|
||||
else:
|
||||
return ''
|
||||
|
||||
|
@ -86,7 +86,8 @@ def main():
|
|||
module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True)
|
||||
return status()
|
||||
|
||||
present = status() != ''
|
||||
process_status = status()
|
||||
present = process_status != ''
|
||||
|
||||
if not present and not state == 'present':
|
||||
module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state)
|
||||
|
@ -102,7 +103,7 @@ def main():
|
|||
module.exit_json(changed=True, name=name, state=state)
|
||||
module.exit_json(changed=False, name=name, state=state)
|
||||
|
||||
running = 'running' in status()
|
||||
running = 'running' in process_status
|
||||
|
||||
if running and state in ['started', 'monitored']:
|
||||
module.exit_json(changed=False, name=name, state=state)
|
||||
|
@ -119,7 +120,7 @@ def main():
|
|||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
status = run_command('unmonitor')
|
||||
if status in ['not monitored']:
|
||||
if status in ['not monitored'] or 'unmonitor pending' in status:
|
||||
module.exit_json(changed=True, name=name, state=state)
|
||||
module.fail_json(msg='%s process not unmonitored' % name, status=status)
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ options:
|
|||
version_added: "2.0"
|
||||
description:
|
||||
- the Servicegroup we want to set downtimes/alerts for.
|
||||
B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime).
|
||||
B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime).
|
||||
command:
|
||||
description:
|
||||
- The raw command to send to nagios, which
|
||||
|
@ -86,7 +86,7 @@ options:
|
|||
required: true
|
||||
default: null
|
||||
|
||||
author: '"Tim Bielawa (@tbielawa)" <tbielawa@redhat.com>'
|
||||
author: "Tim Bielawa (@tbielawa)"
|
||||
requirements: [ "Nagios" ]
|
||||
'''
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: newrelic_deployment
|
||||
version_added: "1.2"
|
||||
author: '"Matt Coddington (@mcodd)" <coddington@gmail.com>'
|
||||
author: "Matt Coddington (@mcodd)"
|
||||
short_description: Notify newrelic about app deployments
|
||||
description:
|
||||
- Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api)
|
||||
|
@ -72,8 +72,7 @@ options:
|
|||
choices: ['yes', 'no']
|
||||
version_added: 1.5.1
|
||||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ urllib, urllib2 ]
|
||||
requirements: []
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -83,6 +82,8 @@ EXAMPLES = '''
|
|||
revision=1.0
|
||||
'''
|
||||
|
||||
import urllib
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
|
|
@ -22,7 +22,7 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: rollbar_deployment
|
||||
version_added: 1.6
|
||||
author: '"Max Riveiro (@kavu)" <kavu13@gmail.com>'
|
||||
author: "Max Riveiro (@kavu)"
|
||||
short_description: Notify Rollbar about app deployments
|
||||
description:
|
||||
- Notify Rollbar about app deployments
|
||||
|
@ -76,6 +76,7 @@ EXAMPLES = '''
|
|||
comment='Test Deploy'
|
||||
'''
|
||||
|
||||
import urllib
|
||||
|
||||
def main():
|
||||
|
||||
|
|
336
monitoring/sensu_check.py
Normal file
336
monitoring/sensu_check.py
Normal file
|
@ -0,0 +1,336 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2014, Anders Ingemann <aim@secoya.dk>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: sensu_check
|
||||
short_description: Manage Sensu checks
|
||||
version_added: 2.0
|
||||
description:
|
||||
- Manage the checks that should be run on a machine by I(Sensu).
|
||||
- Most options do not have a default and will not be added to the check definition unless specified.
|
||||
- All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module,
|
||||
- they are simply specified for your convenience.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the check
|
||||
- This is the key that is used to determine whether a check exists
|
||||
required: true
|
||||
state:
|
||||
description: Whether the check should be present or not
|
||||
choices: [ 'present', 'absent' ]
|
||||
required: false
|
||||
default: present
|
||||
path:
|
||||
description:
|
||||
- Path to the json file of the check to be added/removed.
|
||||
- Will be created if it does not exist (unless I(state=absent)).
|
||||
- The parent folders need to exist when I(state=present), otherwise an error will be thrown
|
||||
required: false
|
||||
default: /etc/sensu/conf.d/checks.json
|
||||
backup:
|
||||
description:
|
||||
- Create a backup file (if yes), including the timestamp information so
|
||||
- you can get the original file back if you somehow clobbered it incorrectly.
|
||||
choices: [ 'yes', 'no' ]
|
||||
required: false
|
||||
default: no
|
||||
command:
|
||||
description:
|
||||
- Path to the sensu check to run (not required when I(state=absent))
|
||||
required: true
|
||||
handlers:
|
||||
description:
|
||||
- List of handlers to notify when the check fails
|
||||
required: false
|
||||
default: []
|
||||
subscribers:
|
||||
description:
|
||||
- List of subscribers/channels this check should run for
|
||||
- See sensu_subscribers to subscribe a machine to a channel
|
||||
required: false
|
||||
default: []
|
||||
interval:
|
||||
description:
|
||||
- Check interval in seconds
|
||||
required: false
|
||||
default: null
|
||||
timeout:
|
||||
description:
|
||||
- Timeout for the check
|
||||
required: false
|
||||
default: 10
|
||||
handle:
|
||||
description:
|
||||
- Whether the check should be handled or not
|
||||
choices: [ 'yes', 'no' ]
|
||||
required: false
|
||||
default: yes
|
||||
subdue_begin:
|
||||
description:
|
||||
- When to disable handling of check failures
|
||||
required: false
|
||||
default: null
|
||||
subdue_end:
|
||||
description:
|
||||
- When to enable handling of check failures
|
||||
required: false
|
||||
default: null
|
||||
dependencies:
|
||||
description:
|
||||
- Other checks this check depends on, if dependencies fail,
|
||||
- handling of this check will be disabled
|
||||
required: false
|
||||
default: []
|
||||
metric:
|
||||
description: Whether the check is a metric
|
||||
choices: [ 'yes', 'no' ]
|
||||
required: false
|
||||
default: no
|
||||
standalone:
|
||||
description:
|
||||
- Whether the check should be scheduled by the sensu client or server
|
||||
- This option obviates the need for specifying the I(subscribers) option
|
||||
choices: [ 'yes', 'no' ]
|
||||
required: false
|
||||
default: no
|
||||
publish:
|
||||
description:
|
||||
- Whether the check should be scheduled at all.
|
||||
- You can still issue it via the sensu api
|
||||
choices: [ 'yes', 'no' ]
|
||||
required: false
|
||||
default: yes
|
||||
occurrences:
|
||||
description:
|
||||
- Number of event occurrences before the handler should take action
|
||||
required: false
|
||||
default: 1
|
||||
refresh:
|
||||
description:
|
||||
- Number of seconds handlers should wait before taking second action
|
||||
required: false
|
||||
default: null
|
||||
aggregate:
|
||||
description:
|
||||
- Classifies the check as an aggregate check,
|
||||
- making it available via the aggregate API
|
||||
choices: [ 'yes', 'no' ]
|
||||
required: false
|
||||
default: no
|
||||
low_flap_threshold:
|
||||
description:
|
||||
- The low threshhold for flap detection
|
||||
required: false
|
||||
default: null
|
||||
high_flap_threshold:
|
||||
description:
|
||||
- The low threshhold for flap detection
|
||||
required: false
|
||||
default: null
|
||||
requirements: [ ]
|
||||
author: Anders Ingemann
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Fetch metrics about the CPU load every 60 seconds,
|
||||
# the sensu server has a handler called 'relay' which forwards stats to graphite
|
||||
- name: get cpu metrics
|
||||
sensu_check: name=cpu_load
|
||||
command=/etc/sensu/plugins/system/cpu-mpstat-metrics.rb
|
||||
metric=yes handlers=relay subscribers=common interval=60
|
||||
|
||||
# Check whether nginx is running
|
||||
- name: check nginx process
|
||||
sensu_check: name=nginx_running
|
||||
command='/etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid'
|
||||
handlers=default subscribers=nginx interval=60
|
||||
|
||||
# Stop monitoring the disk capacity.
|
||||
# Note that the check will still show up in the sensu dashboard,
|
||||
# to remove it completely you need to issue a DELETE request to the sensu api.
|
||||
- name: check disk
|
||||
sensu_check: name=check_disk_capacity
|
||||
'''
|
||||
|
||||
|
||||
def sensu_check(module, path, name, state='present', backup=False):
|
||||
changed = False
|
||||
reasons = []
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
try:
|
||||
try:
|
||||
stream = open(path, 'r')
|
||||
config = json.load(stream.read())
|
||||
except IOError, e:
|
||||
if e.errno is 2: # File not found, non-fatal
|
||||
if state == 'absent':
|
||||
reasons.append('file did not exist and state is `absent\'')
|
||||
return changed, reasons
|
||||
config = {}
|
||||
else:
|
||||
module.fail_json(msg=str(e))
|
||||
except ValueError:
|
||||
msg = '{path} contains invalid JSON'.format(path=path)
|
||||
module.fail_json(msg=msg)
|
||||
finally:
|
||||
if stream:
|
||||
stream.close()
|
||||
|
||||
if 'checks' not in config:
|
||||
if state == 'absent':
|
||||
reasons.append('`checks\' section did not exist and state is `absent\'')
|
||||
return changed, reasons
|
||||
config['checks'] = {}
|
||||
changed = True
|
||||
reasons.append('`checks\' section did not exist')
|
||||
|
||||
if state == 'absent':
|
||||
if name in config['checks']:
|
||||
del config['checks'][name]
|
||||
changed = True
|
||||
reasons.append('check was present and state is `absent\'')
|
||||
|
||||
if state == 'present':
|
||||
if name not in config['checks']:
|
||||
check = {}
|
||||
config['checks'][name] = check
|
||||
changed = True
|
||||
reasons.append('check was absent and state is `present\'')
|
||||
else:
|
||||
check = config['checks'][name]
|
||||
simple_opts = ['command',
|
||||
'handlers',
|
||||
'subscribers',
|
||||
'interval',
|
||||
'timeout',
|
||||
'handle',
|
||||
'dependencies',
|
||||
'standalone',
|
||||
'publish',
|
||||
'occurrences',
|
||||
'refresh',
|
||||
'aggregate',
|
||||
'low_flap_threshold',
|
||||
'high_flap_threshold',
|
||||
]
|
||||
for opt in simple_opts:
|
||||
if module.params[opt] is not None:
|
||||
if opt not in check or check[opt] != module.params[opt]:
|
||||
check[opt] = module.params[opt]
|
||||
changed = True
|
||||
reasons.append('`{opt}\' did not exist or was different'.format(opt=opt))
|
||||
else:
|
||||
if opt in check:
|
||||
del check[opt]
|
||||
changed = True
|
||||
reasons.append('`{opt}\' was removed'.format(opt=opt))
|
||||
|
||||
if module.params['metric']:
|
||||
if 'type' not in check or check['type'] != 'metric':
|
||||
check['type'] = 'metric'
|
||||
changed = True
|
||||
reasons.append('`type\' was not defined or not `metric\'')
|
||||
if not module.params['metric'] and 'type' in check:
|
||||
del check['type']
|
||||
changed = True
|
||||
reasons.append('`type\' was defined')
|
||||
|
||||
if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None:
|
||||
subdue = {'begin': module.params['subdue_begin'],
|
||||
'end': module.params['subdue_end'],
|
||||
}
|
||||
if 'subdue' not in check or check['subdue'] != subdue:
|
||||
check['subdue'] = subdue
|
||||
changed = True
|
||||
reasons.append('`subdue\' did not exist or was different')
|
||||
else:
|
||||
if 'subdue' in check:
|
||||
del check['subdue']
|
||||
changed = True
|
||||
reasons.append('`subdue\' was removed')
|
||||
|
||||
if changed and not module.check_mode:
|
||||
if backup:
|
||||
module.backup_local(path)
|
||||
try:
|
||||
try:
|
||||
stream = open(path, 'w')
|
||||
stream.write(json.dumps(config, indent=2) + '\n')
|
||||
except IOError, e:
|
||||
module.fail_json(msg=str(e))
|
||||
finally:
|
||||
if stream:
|
||||
stream.close()
|
||||
|
||||
return changed, reasons
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
arg_spec = {'name': {'type': 'str', 'required': True},
|
||||
'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
|
||||
'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
|
||||
'backup': {'type': 'bool', 'default': 'no'},
|
||||
'command': {'type': 'str'},
|
||||
'handlers': {'type': 'list'},
|
||||
'subscribers': {'type': 'list'},
|
||||
'interval': {'type': 'int'},
|
||||
'timeout': {'type': 'int'},
|
||||
'handle': {'type': 'bool'},
|
||||
'subdue_begin': {'type': 'str'},
|
||||
'subdue_end': {'type': 'str'},
|
||||
'dependencies': {'type': 'list'},
|
||||
'metric': {'type': 'bool', 'default': 'no'},
|
||||
'standalone': {'type': 'bool'},
|
||||
'publish': {'type': 'bool'},
|
||||
'occurrences': {'type': 'int'},
|
||||
'refresh': {'type': 'int'},
|
||||
'aggregate': {'type': 'bool'},
|
||||
'low_flap_threshold': {'type': 'int'},
|
||||
'high_flap_threshold': {'type': 'int'},
|
||||
}
|
||||
|
||||
required_together = [['subdue_begin', 'subdue_end']]
|
||||
|
||||
module = AnsibleModule(argument_spec=arg_spec,
|
||||
required_together=required_together,
|
||||
supports_check_mode=True)
|
||||
if module.params['state'] != 'absent' and module.params['command'] is None:
|
||||
module.fail_json(msg="missing required arguments: %s" % ",".join(['command']))
|
||||
|
||||
path = module.params['path']
|
||||
name = module.params['name']
|
||||
state = module.params['state']
|
||||
backup = module.params['backup']
|
||||
|
||||
changed, reasons = sensu_check(module, path, name, state, backup)
|
||||
|
||||
module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
|
@ -79,6 +79,10 @@ options:
|
|||
description:
|
||||
- The timeout of API request (seconds).
|
||||
default: 10
|
||||
proxy:
|
||||
description:
|
||||
- The name of the Zabbix Proxy to be used
|
||||
default: None
|
||||
interfaces:
|
||||
description:
|
||||
- List of interfaces to be created for the host (see example below).
|
||||
|
@ -118,6 +122,7 @@ EXAMPLES = '''
|
|||
ip: 10.xx.xx.xx
|
||||
dns: ""
|
||||
port: 12345
|
||||
proxy: a.zabbix.proxy
|
||||
'''
|
||||
|
||||
import logging
|
||||
|
@ -174,21 +179,25 @@ class Host(object):
|
|||
template_ids.append(template_id)
|
||||
return template_ids
|
||||
|
||||
def add_host(self, host_name, group_ids, status, interfaces):
|
||||
def add_host(self, host_name, group_ids, status, interfaces, proxy_id):
|
||||
try:
|
||||
if self._module.check_mode:
|
||||
self._module.exit_json(changed=True)
|
||||
host_list = self._zapi.host.create({'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status})
|
||||
parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status}
|
||||
if proxy_id:
|
||||
parameters['proxy_hostid'] = proxy_id
|
||||
host_list = self._zapi.host.create(parameters)
|
||||
if len(host_list) >= 1:
|
||||
return host_list['hostids'][0]
|
||||
except Exception, e:
|
||||
self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e))
|
||||
|
||||
def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list):
|
||||
def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id):
|
||||
try:
|
||||
if self._module.check_mode:
|
||||
self._module.exit_json(changed=True)
|
||||
self._zapi.host.update({'hostid': host_id, 'groups': group_ids, 'status': status})
|
||||
parameters = {'hostid': host_id, 'groups': group_ids, 'status': status, 'proxy_hostid': proxy_id}
|
||||
self._zapi.host.update(parameters)
|
||||
interface_list_copy = exist_interface_list
|
||||
if interfaces:
|
||||
for interface in interfaces:
|
||||
|
@ -234,6 +243,14 @@ class Host(object):
|
|||
else:
|
||||
return host_list[0]
|
||||
|
||||
# get proxyid by proxy name
|
||||
def get_proxyid_by_proxy_name(self, proxy_name):
|
||||
proxy_list = self._zapi.proxy.get({'output': 'extend', 'filter': {'host': [proxy_name]}})
|
||||
if len(proxy_list) < 1:
|
||||
self._module.fail_json(msg="Proxy not found: %s" % proxy_name)
|
||||
else:
|
||||
return proxy_list[0]['proxyid']
|
||||
|
||||
# get group ids by group names
|
||||
def get_group_ids_by_group_names(self, group_names):
|
||||
group_ids = []
|
||||
|
@ -294,7 +311,7 @@ class Host(object):
|
|||
|
||||
# check all the properties before link or clear template
|
||||
def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids,
|
||||
exist_interfaces, host):
|
||||
exist_interfaces, host, proxy_id):
|
||||
# get the existing host's groups
|
||||
exist_host_groups = self.get_host_groups_by_host_id(host_id)
|
||||
if set(host_groups) != set(exist_host_groups):
|
||||
|
@ -314,6 +331,9 @@ class Host(object):
|
|||
if set(list(template_ids)) != set(exist_template_ids):
|
||||
return True
|
||||
|
||||
if host['proxy_hostid'] != proxy_id:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
# link or clear template of the host
|
||||
|
@ -349,7 +369,8 @@ def main():
|
|||
status=dict(default="enabled", choices=['enabled', 'disabled']),
|
||||
state=dict(default="present", choices=['present', 'absent']),
|
||||
timeout=dict(type='int', default=10),
|
||||
interfaces=dict(required=False)
|
||||
interfaces=dict(required=False),
|
||||
proxy=dict(required=False)
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
@ -367,6 +388,7 @@ def main():
|
|||
state = module.params['state']
|
||||
timeout = module.params['timeout']
|
||||
interfaces = module.params['interfaces']
|
||||
proxy = module.params['proxy']
|
||||
|
||||
# convert enabled to 0; disabled to 1
|
||||
status = 1 if status == "disabled" else 0
|
||||
|
@ -396,6 +418,11 @@ def main():
|
|||
if interface['type'] == 1:
|
||||
ip = interface['ip']
|
||||
|
||||
proxy_id = "0"
|
||||
|
||||
if proxy:
|
||||
proxy_id = host.get_proxyid_by_proxy_name(proxy)
|
||||
|
||||
# check if host exist
|
||||
is_host_exist = host.is_host_exist(host_name)
|
||||
|
||||
|
@ -421,10 +448,10 @@ def main():
|
|||
|
||||
if len(exist_interfaces) > interfaces_len:
|
||||
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
|
||||
exist_interfaces, zabbix_host_obj):
|
||||
exist_interfaces, zabbix_host_obj, proxy_id):
|
||||
host.link_or_clear_template(host_id, template_ids)
|
||||
host.update_host(host_name, group_ids, status, host_id,
|
||||
interfaces, exist_interfaces)
|
||||
interfaces, exist_interfaces, proxy_id)
|
||||
module.exit_json(changed=True,
|
||||
result="Successfully update host %s (%s) and linked with template '%s'"
|
||||
% (host_name, ip, link_templates))
|
||||
|
@ -432,8 +459,8 @@ def main():
|
|||
module.exit_json(changed=False)
|
||||
else:
|
||||
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
|
||||
exist_interfaces_copy, zabbix_host_obj):
|
||||
host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces)
|
||||
exist_interfaces_copy, zabbix_host_obj, proxy_id):
|
||||
host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id)
|
||||
host.link_or_clear_template(host_id, template_ids)
|
||||
module.exit_json(changed=True,
|
||||
result="Successfully update host %s (%s) and linked with template '%s'"
|
||||
|
@ -448,7 +475,7 @@ def main():
|
|||
module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name)
|
||||
|
||||
# create host
|
||||
host_id = host.add_host(host_name, group_ids, status, interfaces)
|
||||
host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id)
|
||||
host.link_or_clear_template(host_id, template_ids)
|
||||
module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % (
|
||||
host_name, ip, link_templates))
|
||||
|
|
|
@ -26,7 +26,7 @@ short_description: Create Zabbix maintenance windows
|
|||
description:
|
||||
- This module will let you create Zabbix maintenance windows.
|
||||
version_added: "1.8"
|
||||
author: '"Alexander Bulimov (@abulimov)" <lazywolf0@gmail.com>'
|
||||
author: "Alexander Bulimov (@abulimov)"
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- zabbix-api
|
||||
|
|
|
@ -28,7 +28,7 @@ version_added: 1.8
|
|||
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices
|
||||
description:
|
||||
- Manage slb server objects on A10 Networks devices via aXAPI
|
||||
author: '"Mischa Peters (@mischapeters)" <mpeters@a10networks.com>'
|
||||
author: "Mischa Peters (@mischapeters)"
|
||||
notes:
|
||||
- Requires A10 Networks aXAPI 2.1
|
||||
options:
|
||||
|
|
|
@ -28,7 +28,7 @@ version_added: 1.8
|
|||
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices
|
||||
description:
|
||||
- Manage slb service-group objects on A10 Networks devices via aXAPI
|
||||
author: '"Mischa Peters (@mischapeters)" <mpeters@a10networks.com>'
|
||||
author: "Mischa Peters (@mischapeters)"
|
||||
notes:
|
||||
- Requires A10 Networks aXAPI 2.1
|
||||
- When a server doesn't exist and is added to the service-group the server will be created
|
||||
|
|
|
@ -28,7 +28,7 @@ version_added: 1.8
|
|||
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices
|
||||
description:
|
||||
- Manage slb virtual server objects on A10 Networks devices via aXAPI
|
||||
author: '"Mischa Peters (@mischapeters)" <mpeters@a10networks.com>'
|
||||
author: "Mischa Peters (@mischapeters)"
|
||||
notes:
|
||||
- Requires A10 Networks aXAPI 2.1
|
||||
requirements:
|
||||
|
|
|
@ -81,8 +81,8 @@ options:
|
|||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
|
||||
requirements: [ "urllib", "urllib2" ]
|
||||
author: '"Nandor Sivok (@dominis)" <nandor@gawker.com>'
|
||||
requirements: []
|
||||
author: "Nandor Sivok (@dominis)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -99,7 +99,7 @@ ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=api
|
|||
|
||||
import base64
|
||||
import socket
|
||||
|
||||
import urllib
|
||||
|
||||
class netscaler(object):
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ notes:
|
|||
- The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP.
|
||||
- This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks.
|
||||
|
||||
requirements: [ urllib, urllib2, hashlib, hmac ]
|
||||
requirements: [ hashlib, hmac ]
|
||||
author: "Brice Burgess (@briceburg)"
|
||||
'''
|
||||
|
||||
|
@ -113,6 +113,8 @@ EXAMPLES = '''
|
|||
# DNSMadeEasy module specific support methods.
|
||||
#
|
||||
|
||||
import urllib
|
||||
|
||||
IMPORT_ERROR = None
|
||||
try:
|
||||
import json
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: "Collect facts from F5 BIG-IP devices"
|
|||
description:
|
||||
- "Collect facts from F5 BIG-IP devices via iControl SOAP API"
|
||||
version_added: "1.6"
|
||||
author: '"Matt Hite (@mhite)" <mhite@hotmail.com>'
|
||||
author: "Matt Hite (@mhite)"
|
||||
notes:
|
||||
- "Requires BIG-IP software version >= 11.4"
|
||||
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
|
||||
|
|
|
@ -27,7 +27,7 @@ short_description: "Manages F5 BIG-IP LTM http monitors"
|
|||
description:
|
||||
- "Manages F5 BIG-IP LTM monitors via iControl SOAP API"
|
||||
version_added: "1.4"
|
||||
author: '"Serge van Ginderachter (@srvg)" <serge@vanginderachter.be>'
|
||||
author: "Serge van Ginderachter (@srvg)"
|
||||
notes:
|
||||
- "Requires BIG-IP software version >= 11"
|
||||
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
|
||||
|
@ -163,35 +163,10 @@ EXAMPLES = '''
|
|||
name: "{{ monitorname }}"
|
||||
'''
|
||||
|
||||
try:
|
||||
import bigsuds
|
||||
except ImportError:
|
||||
bigsuds_found = False
|
||||
else:
|
||||
bigsuds_found = True
|
||||
|
||||
TEMPLATE_TYPE = 'TTYPE_HTTP'
|
||||
DEFAULT_PARENT_TYPE = 'http'
|
||||
|
||||
|
||||
# ===========================================
|
||||
# bigip_monitor module generic methods.
|
||||
# these should be re-useable for other monitor types
|
||||
#
|
||||
|
||||
def bigip_api(bigip, user, password):
|
||||
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
||||
return api
|
||||
|
||||
|
||||
def disable_ssl_cert_validation():
|
||||
|
||||
# You probably only want to do this for testing and never in production.
|
||||
# From https://www.python.org/dev/peps/pep-0476/#id29
|
||||
import ssl
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
|
||||
|
||||
def check_monitor_exists(module, api, monitor, parent):
|
||||
|
||||
|
@ -278,7 +253,6 @@ def set_integer_property(api, monitor, int_property):
|
|||
|
||||
|
||||
def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties):
|
||||
|
||||
changed = False
|
||||
for str_property in template_string_properties:
|
||||
if str_property['value'] is not None and not check_string_property(api, monitor, str_property):
|
||||
|
@ -321,15 +295,8 @@ def set_ipport(api, monitor, ipport):
|
|||
def main():
|
||||
|
||||
# begin monitor specific stuff
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
server = dict(required=True),
|
||||
user = dict(required=True),
|
||||
password = dict(required=True),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
partition = dict(default='Common'),
|
||||
state = dict(default='present', choices=['present', 'absent']),
|
||||
argument_spec=f5_argument_spec();
|
||||
argument_spec.update( dict(
|
||||
name = dict(required=True),
|
||||
parent = dict(default=DEFAULT_PARENT_TYPE),
|
||||
parent_partition = dict(default='Common'),
|
||||
|
@ -341,20 +308,20 @@ def main():
|
|||
interval = dict(required=False, type='int'),
|
||||
timeout = dict(required=False, type='int'),
|
||||
time_until_up = dict(required=False, type='int', default=0)
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
server = module.params['server']
|
||||
user = module.params['user']
|
||||
password = module.params['password']
|
||||
validate_certs = module.params['validate_certs']
|
||||
partition = module.params['partition']
|
||||
(server,user,password,state,partition,validate_certs) = f5_parse_arguments(module)
|
||||
|
||||
parent_partition = module.params['parent_partition']
|
||||
state = module.params['state']
|
||||
name = module.params['name']
|
||||
parent = "/%s/%s" % (parent_partition, module.params['parent'])
|
||||
monitor = "/%s/%s" % (partition, name)
|
||||
parent = fq_name(parent_partition, module.params['parent'])
|
||||
monitor = fq_name(partition, name)
|
||||
send = module.params['send']
|
||||
receive = module.params['receive']
|
||||
receive_disable = module.params['receive_disable']
|
||||
|
@ -366,11 +333,6 @@ def main():
|
|||
|
||||
# end monitor specific stuff
|
||||
|
||||
if not validate_certs:
|
||||
disable_ssl_cert_validation()
|
||||
|
||||
if not bigsuds_found:
|
||||
module.fail_json(msg="the python bigsuds module is required")
|
||||
api = bigip_api(server, user, password)
|
||||
monitor_exists = check_monitor_exists(module, api, monitor, parent)
|
||||
|
||||
|
@ -481,5 +443,6 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.f5 import *
|
||||
main()
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM tcp monitors"
|
|||
description:
|
||||
- "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API"
|
||||
version_added: "1.4"
|
||||
author: '"Serge van Ginderachter (@srvg)" <serge@vanginderachter.be>'
|
||||
author: "Serge van Ginderachter (@srvg)"
|
||||
notes:
|
||||
- "Requires BIG-IP software version >= 11"
|
||||
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
|
||||
|
@ -181,37 +181,11 @@ EXAMPLES = '''
|
|||
|
||||
'''
|
||||
|
||||
try:
|
||||
import bigsuds
|
||||
except ImportError:
|
||||
bigsuds_found = False
|
||||
else:
|
||||
bigsuds_found = True
|
||||
|
||||
TEMPLATE_TYPE = DEFAULT_TEMPLATE_TYPE = 'TTYPE_TCP'
|
||||
TEMPLATE_TYPE_CHOICES = ['tcp', 'tcp_echo', 'tcp_half_open']
|
||||
DEFAULT_PARENT = DEFAULT_TEMPLATE_TYPE_CHOICE = DEFAULT_TEMPLATE_TYPE.replace('TTYPE_', '').lower()
|
||||
|
||||
|
||||
# ===========================================
|
||||
# bigip_monitor module generic methods.
|
||||
# these should be re-useable for other monitor types
|
||||
#
|
||||
|
||||
def bigip_api(bigip, user, password):
|
||||
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
||||
return api
|
||||
|
||||
|
||||
def disable_ssl_cert_validation():
|
||||
|
||||
# You probably only want to do this for testing and never in production.
|
||||
# From https://www.python.org/dev/peps/pep-0476/#id29
|
||||
import ssl
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
|
||||
|
||||
def check_monitor_exists(module, api, monitor, parent):
|
||||
|
||||
# hack to determine if monitor exists
|
||||
|
@ -234,7 +208,7 @@ def check_monitor_exists(module, api, monitor, parent):
|
|||
|
||||
def create_monitor(api, monitor, template_attributes):
|
||||
|
||||
try:
|
||||
try:
|
||||
api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes])
|
||||
except bigsuds.OperationFailed, e:
|
||||
if "already exists" in str(e):
|
||||
|
@ -298,7 +272,6 @@ def set_integer_property(api, monitor, int_property):
|
|||
|
||||
|
||||
def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties):
|
||||
|
||||
changed = False
|
||||
for str_property in template_string_properties:
|
||||
if str_property['value'] is not None and not check_string_property(api, monitor, str_property):
|
||||
|
@ -341,15 +314,8 @@ def set_ipport(api, monitor, ipport):
|
|||
def main():
|
||||
|
||||
# begin monitor specific stuff
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
server = dict(required=True),
|
||||
user = dict(required=True),
|
||||
password = dict(required=True),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
partition = dict(default='Common'),
|
||||
state = dict(default='present', choices=['present', 'absent']),
|
||||
argument_spec=f5_argument_spec();
|
||||
argument_spec.update(dict(
|
||||
name = dict(required=True),
|
||||
type = dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES),
|
||||
parent = dict(default=DEFAULT_PARENT),
|
||||
|
@ -361,21 +327,21 @@ def main():
|
|||
interval = dict(required=False, type='int'),
|
||||
timeout = dict(required=False, type='int'),
|
||||
time_until_up = dict(required=False, type='int', default=0)
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
server = module.params['server']
|
||||
user = module.params['user']
|
||||
password = module.params['password']
|
||||
validate_certs = module.params['validate_certs']
|
||||
partition = module.params['partition']
|
||||
(server,user,password,state,partition,validate_certs) = f5_parse_arguments(module)
|
||||
|
||||
parent_partition = module.params['parent_partition']
|
||||
state = module.params['state']
|
||||
name = module.params['name']
|
||||
type = 'TTYPE_' + module.params['type'].upper()
|
||||
parent = "/%s/%s" % (parent_partition, module.params['parent'])
|
||||
monitor = "/%s/%s" % (partition, name)
|
||||
parent = fq_name(parent_partition, module.params['parent'])
|
||||
monitor = fq_name(partition, name)
|
||||
send = module.params['send']
|
||||
receive = module.params['receive']
|
||||
ip = module.params['ip']
|
||||
|
@ -390,11 +356,6 @@ def main():
|
|||
|
||||
# end monitor specific stuff
|
||||
|
||||
if not validate_certs:
|
||||
disable_ssl_cert_validation()
|
||||
|
||||
if not bigsuds_found:
|
||||
module.fail_json(msg="the python bigsuds module is required")
|
||||
api = bigip_api(server, user, password)
|
||||
monitor_exists = check_monitor_exists(module, api, monitor, parent)
|
||||
|
||||
|
@ -506,5 +467,6 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.f5 import *
|
||||
main()
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM nodes"
|
|||
description:
|
||||
- "Manages F5 BIG-IP LTM nodes via iControl SOAP API"
|
||||
version_added: "1.4"
|
||||
author: '"Matt Hite (@mhite)" <mhite@hotmail.com>'
|
||||
author: "Matt Hite (@mhite)"
|
||||
notes:
|
||||
- "Requires BIG-IP software version >= 11"
|
||||
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
|
||||
|
@ -188,27 +188,6 @@ EXAMPLES = '''
|
|||
|
||||
'''
|
||||
|
||||
try:
|
||||
import bigsuds
|
||||
except ImportError:
|
||||
bigsuds_found = False
|
||||
else:
|
||||
bigsuds_found = True
|
||||
|
||||
# ==========================
|
||||
# bigip_node module specific
|
||||
#
|
||||
|
||||
def bigip_api(bigip, user, password):
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
||||
return api
|
||||
|
||||
def disable_ssl_cert_validation():
|
||||
# You probably only want to do this for testing and never in production.
|
||||
# From https://www.python.org/dev/peps/pep-0476/#id29
|
||||
import ssl
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
|
||||
def node_exists(api, address):
|
||||
# hack to determine if node exists
|
||||
result = False
|
||||
|
@ -283,42 +262,30 @@ def get_node_monitor_status(api, name):
|
|||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
server = dict(type='str', required=True),
|
||||
user = dict(type='str', required=True),
|
||||
password = dict(type='str', required=True),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
state = dict(type='str', default='present', choices=['present', 'absent']),
|
||||
argument_spec=f5_argument_spec();
|
||||
argument_spec.update(dict(
|
||||
session_state = dict(type='str', choices=['enabled', 'disabled']),
|
||||
monitor_state = dict(type='str', choices=['enabled', 'disabled']),
|
||||
partition = dict(type='str', default='Common'),
|
||||
name = dict(type='str', required=True),
|
||||
host = dict(type='str', aliases=['address', 'ip']),
|
||||
description = dict(type='str')
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if not bigsuds_found:
|
||||
module.fail_json(msg="the python bigsuds module is required")
|
||||
(server,user,password,state,partition,validate_certs) = f5_parse_arguments(module)
|
||||
|
||||
server = module.params['server']
|
||||
user = module.params['user']
|
||||
password = module.params['password']
|
||||
validate_certs = module.params['validate_certs']
|
||||
state = module.params['state']
|
||||
session_state = module.params['session_state']
|
||||
monitor_state = module.params['monitor_state']
|
||||
partition = module.params['partition']
|
||||
host = module.params['host']
|
||||
name = module.params['name']
|
||||
address = "/%s/%s" % (partition, name)
|
||||
address = fq_name(partition, name)
|
||||
description = module.params['description']
|
||||
|
||||
if not validate_certs:
|
||||
disable_ssl_cert_validation()
|
||||
|
||||
if state == 'absent' and host is not None:
|
||||
module.fail_json(msg="host parameter invalid when state=absent")
|
||||
|
||||
|
@ -410,5 +377,6 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.f5 import *
|
||||
main()
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM pools"
|
|||
description:
|
||||
- "Manages F5 BIG-IP LTM pools via iControl SOAP API"
|
||||
version_added: "1.2"
|
||||
author: '"Matt Hite (@mhite)" <mhite@hotmail.com>'
|
||||
author: "Matt Hite (@mhite)"
|
||||
notes:
|
||||
- "Requires BIG-IP software version >= 11"
|
||||
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
|
||||
|
@ -228,27 +228,6 @@ EXAMPLES = '''
|
|||
|
||||
'''
|
||||
|
||||
try:
|
||||
import bigsuds
|
||||
except ImportError:
|
||||
bigsuds_found = False
|
||||
else:
|
||||
bigsuds_found = True
|
||||
|
||||
# ===========================================
|
||||
# bigip_pool module specific support methods.
|
||||
#
|
||||
|
||||
def bigip_api(bigip, user, password):
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
||||
return api
|
||||
|
||||
def disable_ssl_cert_validation():
|
||||
# You probably only want to do this for testing and never in production.
|
||||
# From https://www.python.org/dev/peps/pep-0476/#id29
|
||||
import ssl
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
|
||||
def pool_exists(api, pool):
|
||||
# hack to determine if pool exists
|
||||
result = False
|
||||
|
@ -368,15 +347,9 @@ def main():
|
|||
|
||||
service_down_choices = ['none', 'reset', 'drop', 'reselect']
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
server = dict(type='str', required=True),
|
||||
user = dict(type='str', required=True),
|
||||
password = dict(type='str', required=True),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
state = dict(type='str', default='present', choices=['present', 'absent']),
|
||||
argument_spec=f5_argument_spec();
|
||||
argument_spec.update(dict(
|
||||
name = dict(type='str', required=True, aliases=['pool']),
|
||||
partition = dict(type='str', default='Common'),
|
||||
lb_method = dict(type='str', choices=lb_method_choices),
|
||||
monitor_type = dict(type='str', choices=monitor_type_choices),
|
||||
quorum = dict(type='int'),
|
||||
|
@ -385,21 +358,18 @@ def main():
|
|||
service_down_action = dict(type='str', choices=service_down_choices),
|
||||
host = dict(type='str', aliases=['address']),
|
||||
port = dict(type='int')
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if not bigsuds_found:
|
||||
module.fail_json(msg="the python bigsuds module is required")
|
||||
(server,user,password,state,partition,validate_certs) = f5_parse_arguments(module)
|
||||
|
||||
server = module.params['server']
|
||||
user = module.params['user']
|
||||
password = module.params['password']
|
||||
validate_certs = module.params['validate_certs']
|
||||
state = module.params['state']
|
||||
name = module.params['name']
|
||||
partition = module.params['partition']
|
||||
pool = "/%s/%s" % (partition, name)
|
||||
pool = fq_name(partition,name)
|
||||
lb_method = module.params['lb_method']
|
||||
if lb_method:
|
||||
lb_method = lb_method.lower()
|
||||
|
@ -411,16 +381,13 @@ def main():
|
|||
if monitors:
|
||||
monitors = []
|
||||
for monitor in module.params['monitors']:
|
||||
if "/" not in monitor:
|
||||
monitors.append("/%s/%s" % (partition, monitor))
|
||||
else:
|
||||
monitors.append(monitor)
|
||||
monitors.append(fq_name(partition, monitor))
|
||||
slow_ramp_time = module.params['slow_ramp_time']
|
||||
service_down_action = module.params['service_down_action']
|
||||
if service_down_action:
|
||||
service_down_action = service_down_action.lower()
|
||||
host = module.params['host']
|
||||
address = "/%s/%s" % (partition, host)
|
||||
address = fq_name(partition,host)
|
||||
port = module.params['port']
|
||||
|
||||
if not validate_certs:
|
||||
|
@ -551,5 +518,6 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.f5 import *
|
||||
main()
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ short_description: "Manages F5 BIG-IP LTM pool members"
|
|||
description:
|
||||
- "Manages F5 BIG-IP LTM pool members via iControl SOAP API"
|
||||
version_added: "1.4"
|
||||
author: '"Matt Hite (@mhite)" <mhite@hotmail.com>'
|
||||
author: "Matt Hite (@mhite)"
|
||||
notes:
|
||||
- "Requires BIG-IP software version >= 11"
|
||||
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
|
||||
|
@ -196,27 +196,6 @@ EXAMPLES = '''
|
|||
|
||||
'''
|
||||
|
||||
try:
|
||||
import bigsuds
|
||||
except ImportError:
|
||||
bigsuds_found = False
|
||||
else:
|
||||
bigsuds_found = True
|
||||
|
||||
# ===========================================
|
||||
# bigip_pool_member module specific support methods.
|
||||
#
|
||||
|
||||
def bigip_api(bigip, user, password):
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
||||
return api
|
||||
|
||||
def disable_ssl_cert_validation():
|
||||
# You probably only want to do this for testing and never in production.
|
||||
# From https://www.python.org/dev/peps/pep-0476/#id29
|
||||
import ssl
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
|
||||
def pool_exists(api, pool):
|
||||
# hack to determine if pool exists
|
||||
result = False
|
||||
|
@ -327,49 +306,37 @@ def get_member_monitor_status(api, pool, address, port):
|
|||
return result
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
server = dict(type='str', required=True),
|
||||
user = dict(type='str', required=True),
|
||||
password = dict(type='str', required=True),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
state = dict(type='str', default='present', choices=['present', 'absent']),
|
||||
argument_spec = f5_argument_spec();
|
||||
argument_spec.update(dict(
|
||||
session_state = dict(type='str', choices=['enabled', 'disabled']),
|
||||
monitor_state = dict(type='str', choices=['enabled', 'disabled']),
|
||||
pool = dict(type='str', required=True),
|
||||
partition = dict(type='str', default='Common'),
|
||||
host = dict(type='str', required=True, aliases=['address', 'name']),
|
||||
port = dict(type='int', required=True),
|
||||
connection_limit = dict(type='int'),
|
||||
description = dict(type='str'),
|
||||
rate_limit = dict(type='int'),
|
||||
ratio = dict(type='int')
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if not bigsuds_found:
|
||||
module.fail_json(msg="the python bigsuds module is required")
|
||||
|
||||
server = module.params['server']
|
||||
user = module.params['user']
|
||||
password = module.params['password']
|
||||
validate_certs = module.params['validate_certs']
|
||||
state = module.params['state']
|
||||
(server,user,password,state,partition,validate_certs) = f5_parse_arguments(module)
|
||||
session_state = module.params['session_state']
|
||||
monitor_state = module.params['monitor_state']
|
||||
partition = module.params['partition']
|
||||
pool = "/%s/%s" % (partition, module.params['pool'])
|
||||
pool = fq_name(partition, module.params['pool'])
|
||||
connection_limit = module.params['connection_limit']
|
||||
description = module.params['description']
|
||||
rate_limit = module.params['rate_limit']
|
||||
ratio = module.params['ratio']
|
||||
host = module.params['host']
|
||||
address = "/%s/%s" % (partition, host)
|
||||
address = fq_name(partition, host)
|
||||
port = module.params['port']
|
||||
|
||||
if not validate_certs:
|
||||
disable_ssl_cert_validation()
|
||||
|
||||
# sanity check user supplied values
|
||||
|
||||
|
@ -457,5 +424,6 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.f5 import *
|
||||
main()
|
||||
|
||||
|
|
|
@ -68,6 +68,24 @@ options:
|
|||
- When disabling server, immediately terminate all the sessions attached to the specified server. This can be used to terminate long-running sessions after a server is put into maintenance mode, for instance.
|
||||
required: false
|
||||
default: false
|
||||
wait:
|
||||
description:
|
||||
- Wait until the server reports a status of 'UP' when state=enabled, or status of 'MAINT' when state=disabled
|
||||
required: false
|
||||
default: false
|
||||
version_added: "2.0"
|
||||
wait_retries:
|
||||
description:
|
||||
- number of times to check for status after changing the state
|
||||
required: false
|
||||
default: 25
|
||||
version_added: "2.0"
|
||||
wait_interval:
|
||||
description:
|
||||
- number of seconds to wait between retries
|
||||
required: false
|
||||
default: 5
|
||||
version_added: "2.0"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -82,24 +100,37 @@ examples:
|
|||
# disable server, provide socket file
|
||||
- haproxy: state=disabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock backend=www
|
||||
|
||||
# disable server, provide socket file, wait until status reports in maintenance
|
||||
- haproxy: state=disabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock backend=www wait=yes
|
||||
|
||||
# disable backend server in 'www' backend pool and drop open sessions to it
|
||||
- haproxy: state=disabled host={{ inventory_hostname }} backend=www socket=/var/run/haproxy.sock shutdown_sessions=true
|
||||
|
||||
# enable server in 'www' backend pool
|
||||
- haproxy: state=enabled host={{ inventory_hostname }} backend=www
|
||||
|
||||
# enable server in 'www' backend pool wait until healthy
|
||||
- haproxy: state=enabled host={{ inventory_hostname }} backend=www wait=yes
|
||||
|
||||
# enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health
|
||||
- haproxy: state=enabled host={{ inventory_hostname }} backend=www wait=yes wait_retries=10 wait_interval=5
|
||||
|
||||
# enable server in 'www' backend pool with change server(s) weight
|
||||
- haproxy: state=enabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock weight=10 backend=www
|
||||
|
||||
author: "Ravi Bhure (@ravibhure)" <ravibhure@gmail.com>
|
||||
author: "Ravi Bhure (@ravibhure)"
|
||||
'''
|
||||
|
||||
import socket
|
||||
import csv
|
||||
import time
|
||||
|
||||
|
||||
DEFAULT_SOCKET_LOCATION="/var/run/haproxy.sock"
|
||||
RECV_SIZE = 1024
|
||||
ACTION_CHOICES = ['enabled', 'disabled']
|
||||
WAIT_RETRIES=25
|
||||
WAIT_INTERVAL=5
|
||||
|
||||
######################################################################
|
||||
class TimeoutException(Exception):
|
||||
|
@ -126,10 +157,12 @@ class HAProxy(object):
|
|||
self.weight = self.module.params['weight']
|
||||
self.socket = self.module.params['socket']
|
||||
self.shutdown_sessions = self.module.params['shutdown_sessions']
|
||||
|
||||
self.wait = self.module.params['wait']
|
||||
self.wait_retries = self.module.params['wait_retries']
|
||||
self.wait_interval = self.module.params['wait_interval']
|
||||
self.command_results = []
|
||||
|
||||
def execute(self, cmd, timeout=200):
|
||||
def execute(self, cmd, timeout=200, capture_output=True):
|
||||
"""
|
||||
Executes a HAProxy command by sending a message to a HAProxy's local
|
||||
UNIX socket and waiting up to 'timeout' milliseconds for the response.
|
||||
|
@ -144,10 +177,35 @@ class HAProxy(object):
|
|||
while buf:
|
||||
result += buf
|
||||
buf = self.client.recv(RECV_SIZE)
|
||||
self.command_results = result.strip()
|
||||
if capture_output:
|
||||
self.command_results = result.strip()
|
||||
self.client.close()
|
||||
return result
|
||||
|
||||
def wait_until_status(self, pxname, svname, status):
|
||||
"""
|
||||
Wait for a service to reach the specified status. Try RETRIES times
|
||||
with INTERVAL seconds of sleep in between. If the service has not reached
|
||||
the expected status in that time, the module will fail. If the service was
|
||||
not found, the module will fail.
|
||||
"""
|
||||
for i in range(1, self.wait_retries):
|
||||
data = self.execute('show stat', 200, False).lstrip('# ')
|
||||
r = csv.DictReader(data.splitlines())
|
||||
found = False
|
||||
for row in r:
|
||||
if row['pxname'] == pxname and row['svname'] == svname:
|
||||
found = True
|
||||
if row['status'] == status:
|
||||
return True;
|
||||
else:
|
||||
time.sleep(self.wait_interval)
|
||||
|
||||
if not found:
|
||||
self.module.fail_json(msg="unable to find server %s/%s" % (pxname, svname))
|
||||
|
||||
self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." % (pxname, svname, status, self.wait_retries))
|
||||
|
||||
def enabled(self, host, backend, weight):
|
||||
"""
|
||||
Enabled action, marks server to UP and checks are re-enabled,
|
||||
|
@ -170,6 +228,8 @@ class HAProxy(object):
|
|||
if weight:
|
||||
cmd += "; set weight %s/%s %s" % (pxname, svname, weight)
|
||||
self.execute(cmd)
|
||||
if self.wait:
|
||||
self.wait_until_status(pxname, svname, 'UP')
|
||||
|
||||
else:
|
||||
pxname = backend
|
||||
|
@ -177,6 +237,8 @@ class HAProxy(object):
|
|||
if weight:
|
||||
cmd += "; set weight %s/%s %s" % (pxname, svname, weight)
|
||||
self.execute(cmd)
|
||||
if self.wait:
|
||||
self.wait_until_status(pxname, svname, 'UP')
|
||||
|
||||
def disabled(self, host, backend, shutdown_sessions):
|
||||
"""
|
||||
|
@ -200,6 +262,8 @@ class HAProxy(object):
|
|||
if shutdown_sessions:
|
||||
cmd += "; shutdown sessions server %s/%s" % (pxname, svname)
|
||||
self.execute(cmd)
|
||||
if self.wait:
|
||||
self.wait_until_status(pxname, svname, 'MAINT')
|
||||
|
||||
else:
|
||||
pxname = backend
|
||||
|
@ -207,6 +271,8 @@ class HAProxy(object):
|
|||
if shutdown_sessions:
|
||||
cmd += "; shutdown sessions server %s/%s" % (pxname, svname)
|
||||
self.execute(cmd)
|
||||
if self.wait:
|
||||
self.wait_until_status(pxname, svname, 'MAINT')
|
||||
|
||||
def act(self):
|
||||
"""
|
||||
|
@ -236,6 +302,9 @@ def main():
|
|||
weight=dict(required=False, default=None),
|
||||
socket = dict(required=False, default=DEFAULT_SOCKET_LOCATION),
|
||||
shutdown_sessions=dict(required=False, default=False),
|
||||
wait=dict(required=False, default=False, type='bool'),
|
||||
wait_retries=dict(required=False, default=WAIT_RETRIES, type='int'),
|
||||
wait_interval=dict(required=False, default=WAIT_INTERVAL, type='int'),
|
||||
),
|
||||
|
||||
)
|
||||
|
|
1070
network/nmcli.py
Normal file
1070
network/nmcli.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -22,7 +22,7 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: openvswitch_bridge
|
||||
version_added: 1.4
|
||||
author: '"David Stygstra (@stygstra)" <david.stygstra@gmail.com>'
|
||||
author: "David Stygstra (@stygstra)"
|
||||
short_description: Manage Open vSwitch bridges
|
||||
requirements: [ ovs-vsctl ]
|
||||
description:
|
||||
|
|
|
@ -22,7 +22,7 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: openvswitch_port
|
||||
version_added: 1.4
|
||||
author: '"David Stygstra (@stygstra)" <david.stygstra@gmail.com>'
|
||||
author: "David Stygstra (@stygstra)"
|
||||
short_description: Manage Open vSwitch ports
|
||||
requirements: [ ovs-vsctl ]
|
||||
description:
|
||||
|
|
|
@ -43,7 +43,7 @@ options:
|
|||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ urllib2, cgi ]
|
||||
author: '"Adam Garside (@fabulops)" <adam.garside@gmail.com>'
|
||||
author: "Adam Garside (@fabulops)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -22,7 +22,7 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: flowdock
|
||||
version_added: "1.2"
|
||||
author: '"Matt Coddington (@mcodd)" <coddington@gmail.com>'
|
||||
author: "Matt Coddington (@mcodd)"
|
||||
short_description: Send a message to a flowdock
|
||||
description:
|
||||
- Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
|
||||
|
@ -85,8 +85,7 @@ options:
|
|||
choices: ['yes', 'no']
|
||||
version_added: 1.5.1
|
||||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ urllib, urllib2 ]
|
||||
requirements: [ ]
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -104,6 +103,8 @@ EXAMPLES = '''
|
|||
tags=tag1,tag2,tag3
|
||||
'''
|
||||
|
||||
import urllib
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
|
|
@ -39,7 +39,7 @@ options:
|
|||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
version_added: 1.5.1
|
||||
author: '"Jonas Pfenniger (@zimbatm)" <zimbatm@zimbatm.com>'
|
||||
author: "Jonas Pfenniger (@zimbatm)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -49,6 +49,8 @@ EXAMPLES = '''
|
|||
message=deployed {{ target }}
|
||||
'''
|
||||
|
||||
import urllib
|
||||
|
||||
BASE_URL = 'https://grove.io/api/notice/%s/'
|
||||
|
||||
# ==============================================================
|
||||
|
|
97
notification/hall.py
Executable file
97
notification/hall.py
Executable file
|
@ -0,0 +1,97 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2015, Billy Kimble <basslines@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
DOCUMENTATION = """
|
||||
module: hall
|
||||
short_description: Send notification to Hall
|
||||
description:
|
||||
- "The M(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms."
|
||||
version_added: "2.0"
|
||||
author: Billy Kimble (@bkimble) <basslines@gmail.com>
|
||||
options:
|
||||
room_token:
|
||||
description:
|
||||
- "Room token provided to you by setting up the Ansible room integation on U(https://hall.com)"
|
||||
required: true
|
||||
msg:
|
||||
description:
|
||||
- The message you wish to deliver as a notifcation
|
||||
required: true
|
||||
title:
|
||||
description:
|
||||
- The title of the message
|
||||
required: true
|
||||
picture:
|
||||
description:
|
||||
- "The full URL to the image you wish to use for the Icon of the message. Defaults to U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)"
|
||||
required: false
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Send Hall notifiation
|
||||
local_action:
|
||||
module: hall
|
||||
room_token: <hall room integration token>
|
||||
title: Nginx
|
||||
msg: Created virtual host file on {{ inventory_hostname }}
|
||||
|
||||
- name: Send Hall notification if EC2 servers were created.
|
||||
when: ec2.instances|length > 0
|
||||
local_action:
|
||||
module: hall
|
||||
room_token: <hall room integration token>
|
||||
title: Server Creation
|
||||
msg: "Created EC2 instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region."
|
||||
with_items: ec2.instances
|
||||
"""
|
||||
|
||||
HALL_API_ENDPOINT = 'https://hall.com/api/1/services/generic/%s'
|
||||
|
||||
def send_request_to_hall(module, room_token, payload):
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
payload=module.jsonify(payload)
|
||||
api_endpoint = HALL_API_ENDPOINT % (room_token)
|
||||
response, info = fetch_url(module, api_endpoint, data=payload, headers=headers)
|
||||
if info['status'] != 200:
|
||||
secure_url = HALL_API_ENDPOINT % ('[redacted]')
|
||||
module.fail_json(msg=" failed to send %s to %s: %s" % (payload, secure_url, info['msg']))
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
room_token = dict(type='str', required=True),
|
||||
msg = dict(type='str', required=True),
|
||||
title = dict(type='str', required=True),
|
||||
picture = dict(type='str', default='http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627'),
|
||||
)
|
||||
)
|
||||
|
||||
room_token = module.params['room_token']
|
||||
message = module.params['msg']
|
||||
title = module.params['title']
|
||||
picture = module.params['picture']
|
||||
payload = {'title': title, 'message': message, 'picture': picture}
|
||||
send_request_to_hall(module, room_token, payload)
|
||||
module.exit_json(msg="OK")
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
main()
|
|
@ -62,8 +62,7 @@ options:
|
|||
version_added: 1.6.0
|
||||
|
||||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ urllib, urllib2 ]
|
||||
requirements: [ ]
|
||||
author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul (@pb8226)"
|
||||
'''
|
||||
|
||||
|
@ -75,6 +74,8 @@ EXAMPLES = '''
|
|||
# HipChat module specific support methods.
|
||||
#
|
||||
|
||||
import urllib
|
||||
|
||||
DEFAULT_URI = "https://api.hipchat.com/v1"
|
||||
|
||||
MSG_URI_V1 = "/rooms/message"
|
||||
|
|
|
@ -47,6 +47,12 @@ options:
|
|||
- The message body.
|
||||
required: true
|
||||
default: null
|
||||
topic:
|
||||
description:
|
||||
- Set the channel topic
|
||||
required: false
|
||||
default: null
|
||||
version_added: 2.0
|
||||
color:
|
||||
description:
|
||||
- Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
|
||||
|
@ -106,7 +112,7 @@ import ssl
|
|||
from time import sleep
|
||||
|
||||
|
||||
def send_msg(channel, msg, server='localhost', port='6667', key=None,
|
||||
def send_msg(channel, msg, server='localhost', port='6667', key=None, topic=None,
|
||||
nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False):
|
||||
'''send message to IRC'''
|
||||
|
||||
|
@ -163,6 +169,10 @@ def send_msg(channel, msg, server='localhost', port='6667', key=None,
|
|||
raise Exception('Timeout waiting for IRC JOIN response')
|
||||
sleep(0.5)
|
||||
|
||||
if topic is not None:
|
||||
irc.send('TOPIC %s :%s\r\n' % (channel, topic))
|
||||
sleep(1)
|
||||
|
||||
irc.send('PRIVMSG %s :%s\r\n' % (channel, message))
|
||||
sleep(1)
|
||||
irc.send('PART %s\r\n' % channel)
|
||||
|
@ -186,6 +196,7 @@ def main():
|
|||
"blue", "black", "none"]),
|
||||
channel=dict(required=True),
|
||||
key=dict(),
|
||||
topic=dict(),
|
||||
passwd=dict(),
|
||||
timeout=dict(type='int', default=30),
|
||||
use_ssl=dict(type='bool', default=False)
|
||||
|
@ -196,6 +207,7 @@ def main():
|
|||
server = module.params["server"]
|
||||
port = module.params["port"]
|
||||
nick = module.params["nick"]
|
||||
topic = module.params["topic"]
|
||||
msg = module.params["msg"]
|
||||
color = module.params["color"]
|
||||
channel = module.params["channel"]
|
||||
|
@ -205,7 +217,7 @@ def main():
|
|||
use_ssl = module.params["use_ssl"]
|
||||
|
||||
try:
|
||||
send_msg(channel, msg, server, port, key, nick, color, passwd, timeout, use_ssl)
|
||||
send_msg(channel, msg, server, port, key, topic, nick, color, passwd, timeout, use_ssl)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="unable to send to IRC: %s" % e)
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
author: '"Dag Wieers (@dagwieers)" <dag@wieers.com>'
|
||||
author: "Dag Wieers (@dagwieers)"
|
||||
module: mail
|
||||
short_description: Send an email
|
||||
description:
|
||||
|
@ -110,6 +110,12 @@ options:
|
|||
- The character set of email being sent
|
||||
default: 'us-ascii'
|
||||
required: false
|
||||
subtype:
|
||||
description:
|
||||
- The minor mime type, can be either text or html. The major type is always text.
|
||||
default: 'plain'
|
||||
required: false
|
||||
version_added: "2.0"
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -183,7 +189,8 @@ def main():
|
|||
body = dict(default=None),
|
||||
attach = dict(default=None),
|
||||
headers = dict(default=None),
|
||||
charset = dict(default='us-ascii')
|
||||
charset = dict(default='us-ascii'),
|
||||
subtype = dict(default='plain')
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -200,6 +207,7 @@ def main():
|
|||
attach_files = module.params.get('attach')
|
||||
headers = module.params.get('headers')
|
||||
charset = module.params.get('charset')
|
||||
subtype = module.params.get('subtype')
|
||||
sender_phrase, sender_addr = parseaddr(sender)
|
||||
|
||||
if not body:
|
||||
|
@ -259,7 +267,7 @@ def main():
|
|||
if len(cc_list) > 0:
|
||||
msg['Cc'] = ", ".join(cc_list)
|
||||
|
||||
part = MIMEText(body + "\n\n", _charset=charset)
|
||||
part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset)
|
||||
msg.attach(part)
|
||||
|
||||
if attach_files is not None:
|
||||
|
|
|
@ -24,7 +24,7 @@ short_description: Send a SMS via nexmo
|
|||
description:
|
||||
- Send a SMS message via nexmo
|
||||
version_added: 1.6
|
||||
author: '"Matt Martz (@sivel)" <matt@sivel.net>'
|
||||
author: "Matt Martz (@sivel)"
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
|
@ -71,6 +71,7 @@ EXAMPLES = """
|
|||
msg: "{{ inventory_hostname }} completed"
|
||||
"""
|
||||
|
||||
import urllib
|
||||
|
||||
NEXMO_API = 'https://rest.nexmo.com/sms/json'
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ options:
|
|||
description: Message priority (see u(https://pushover.net) for details.)
|
||||
required: false
|
||||
|
||||
author: '"Jim Richardson (@weaselkeeper)" <weaselkeeper@gmail.com>'
|
||||
author: "Jim Richardson (@weaselkeeper)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -53,7 +53,7 @@ options:
|
|||
the desired subject for the email
|
||||
required: true
|
||||
|
||||
author: '"Matt Makai (@makaimc)" <matthew.makai@gmail.com>'
|
||||
author: "Matt Makai (@makaimc)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -84,10 +84,8 @@ EXAMPLES = '''
|
|||
# =======================================
|
||||
# sendgrid module support methods
|
||||
#
|
||||
try:
|
||||
import urllib, urllib2
|
||||
except ImportError:
|
||||
module.fail_json(msg="urllib and urllib2 are required")
|
||||
import urllib
|
||||
import urllib2
|
||||
|
||||
import base64
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ short_description: Send Slack notifications
|
|||
description:
|
||||
- The M(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration
|
||||
version_added: 1.6
|
||||
author: '"Ramon de la Fuente (@ramondelafuente)" <ramon@delafuente.nl>'
|
||||
author: "Ramon de la Fuente (@ramondelafuente)"
|
||||
options:
|
||||
domain:
|
||||
description:
|
||||
|
|
|
@ -24,7 +24,7 @@ short_description: Send Amazon Simple Notification Service (SNS) messages
|
|||
description:
|
||||
- The M(sns) module sends notifications to a topic on your Amazon SNS account
|
||||
version_added: 1.6
|
||||
author: '"Michael J. Schultz (@mjschultz)" <mjschultz@gmail.com>'
|
||||
author: "Michael J. Schultz (@mjschultz)"
|
||||
options:
|
||||
msg:
|
||||
description:
|
||||
|
|
|
@ -58,7 +58,7 @@ options:
|
|||
(multimedia message) instead of a plain SMS
|
||||
required: false
|
||||
|
||||
author: '"Matt Makai (@makaimc)" <matthew.makai@gmail.com>'
|
||||
author: "Matt Makai (@makaimc)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -104,10 +104,8 @@ EXAMPLES = '''
|
|||
# =======================================
|
||||
# twilio module support methods
|
||||
#
|
||||
try:
|
||||
import urllib, urllib2
|
||||
except ImportError:
|
||||
module.fail_json(msg="urllib and urllib2 are required")
|
||||
import urllib
|
||||
import urllib2
|
||||
|
||||
import base64
|
||||
|
||||
|
|
|
@ -25,23 +25,17 @@ options:
|
|||
description:
|
||||
- message body
|
||||
required: true
|
||||
requirements: [ urllib, urllib2, json ]
|
||||
author: '"Takashi Someda (@tksmd)" <someda@isenshi.com>'
|
||||
requirements: [ json ]
|
||||
author: "Takashi Someda (@tksmd)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- typetalk: client_id=12345 client_secret=12345 topic=1 msg="install completed"
|
||||
'''
|
||||
|
||||
try:
|
||||
import urllib
|
||||
except ImportError:
|
||||
urllib = None
|
||||
import urllib
|
||||
|
||||
try:
|
||||
import urllib2
|
||||
except ImportError:
|
||||
urllib2 = None
|
||||
import urllib2
|
||||
|
||||
try:
|
||||
import json
|
||||
|
@ -96,8 +90,8 @@ def main():
|
|||
supports_check_mode=False
|
||||
)
|
||||
|
||||
if not (urllib and urllib2 and json):
|
||||
module.fail_json(msg="urllib, urllib2 and json modules are required")
|
||||
if not json:
|
||||
module.fail_json(msg="json module is required")
|
||||
|
||||
client_id = module.params["client_id"]
|
||||
client_secret = module.params["client_secret"]
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue