Merge branch 'devel' of github.com:ansible/ansible into devel
Conflicts: library/monitoring/pagerduty
This commit is contained in:
commit
9a9924b975
161 changed files with 12788 additions and 4984 deletions
cloud
cloudformationdigital_oceandigital_ocean_domaindigital_ocean_sshkeydockerdocker_imageec2ec2_amiec2_ami_searchec2_asgec2_eipec2_elbec2_elb_lbec2_factsec2_groupec2_keyec2_lcec2_metric_alarmec2_scaling_policyec2_snapshotec2_tagec2_volec2_vpcelasticachegc_storagegcegce_lbgce_netgce_pdkeystone_usernova_computenova_keypairquantum_floating_ipquantum_subnetraxrax_cbsrax_cbs_attachmentsrax_clbrax_clb_nodesrax_dnsrax_dns_recordrax_factsrax_filesrax_files_objectsrax_identityrax_keypairrax_networkrax_queuerdsroute53s3virt
commands
database
mongodb_usermysql_dbmysql_replicationmysql_usermysql_variablespostgresql_privspostgresql_userredisriak
files
internal
messaging
monitoring
airbrake_deploymentboundary_meterdatadog_eventlibrato_annotationlogentriesmonitnewrelic_deploymentpagerdutyrollbar_deployment
net_infrastructure
network
notification
|
@ -196,7 +196,7 @@ def main():
|
|||
template_parameters=dict(required=False, type='dict', default={}),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
template=dict(default=None, required=True),
|
||||
disable_rollback=dict(default=False),
|
||||
disable_rollback=dict(default=False, type='bool'),
|
||||
tags=dict(default=None)
|
||||
)
|
||||
)
|
||||
|
@ -250,7 +250,7 @@ def main():
|
|||
operation = 'CREATE'
|
||||
except Exception, err:
|
||||
error_msg = boto_exception(err)
|
||||
if 'AlreadyExistsException' in error_msg:
|
||||
if 'AlreadyExistsException' in error_msg or 'already exists' in error_msg:
|
||||
update = True
|
||||
else:
|
||||
module.fail_json(msg=error_msg)
|
||||
|
|
|
@ -20,7 +20,7 @@ DOCUMENTATION = '''
|
|||
module: digital_ocean
|
||||
short_description: Create/delete a droplet/SSH_key in DigitalOcean
|
||||
description:
|
||||
- Create/delete a droplet in DigitalOcean and optionally waits for it to be 'running', or deploy an SSH key.
|
||||
- Create/delete a droplet in DigitalOcean and optionally wait for it to be 'running', or deploy an SSH key.
|
||||
version_added: "1.3"
|
||||
options:
|
||||
command:
|
||||
|
@ -35,10 +35,10 @@ options:
|
|||
choices: ['present', 'active', 'absent', 'deleted']
|
||||
client_id:
|
||||
description:
|
||||
- Digital Ocean manager id.
|
||||
- DigitalOcean manager id.
|
||||
api_key:
|
||||
description:
|
||||
- Digital Ocean api key.
|
||||
- DigitalOcean api key.
|
||||
id:
|
||||
description:
|
||||
- Numeric, the droplet id you want to operate on.
|
||||
|
@ -47,34 +47,40 @@ options:
|
|||
- String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key.
|
||||
unique_name:
|
||||
description:
|
||||
- Bool, require unique hostnames. By default, digital ocean allows multiple hosts with the same name. Setting this to "yes" allows only one host per name. Useful for idempotence.
|
||||
- Bool, require unique hostnames. By default, DigitalOcean allows multiple hosts with the same name. Setting this to "yes" allows only one host per name. Useful for idempotence.
|
||||
version_added: "1.4"
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
size_id:
|
||||
description:
|
||||
- Numeric, this is the id of the size you would like the droplet created at.
|
||||
- Numeric, this is the id of the size you would like the droplet created with.
|
||||
image_id:
|
||||
description:
|
||||
- Numeric, this is the id of the image you would like the droplet created with.
|
||||
region_id:
|
||||
description:
|
||||
- "Numeric, this is the id of the region you would like your server"
|
||||
- "Numeric, this is the id of the region you would like your server to be created in."
|
||||
ssh_key_ids:
|
||||
description:
|
||||
- Optional, comma separated list of ssh_key_ids that you would like to be added to the server
|
||||
- Optional, comma separated list of ssh_key_ids that you would like to be added to the server.
|
||||
virtio:
|
||||
description:
|
||||
- "Bool, turn on virtio driver in droplet for improved network and storage I/O"
|
||||
- "Bool, turn on virtio driver in droplet for improved network and storage I/O."
|
||||
version_added: "1.4"
|
||||
default: "yes"
|
||||
choices: [ "yes", "no" ]
|
||||
private_networking:
|
||||
description:
|
||||
- "Bool, add an additional, private network interface to droplet for inter-droplet communication"
|
||||
- "Bool, add an additional, private network interface to droplet for inter-droplet communication."
|
||||
version_added: "1.4"
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
backups_enabled:
|
||||
description:
|
||||
- Optional, Boolean, enables backups for your droplet.
|
||||
version_added: "1.6"
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
wait:
|
||||
description:
|
||||
- Wait for the droplet to be in state 'running' before returning. If wait is "no" an ip_address may not be returned.
|
||||
|
@ -164,11 +170,11 @@ try:
|
|||
import dopy
|
||||
from dopy.manager import DoError, DoManager
|
||||
except ImportError, e:
|
||||
print "failed=True msg='dopy >= 0.2.2 required for this module'"
|
||||
print "failed=True msg='dopy >= 0.2.3 required for this module'"
|
||||
sys.exit(1)
|
||||
|
||||
if dopy.__version__ < '0.2.2':
|
||||
print "failed=True msg='dopy >= 0.2.2 required for this module'"
|
||||
if dopy.__version__ < '0.2.3':
|
||||
print "failed=True msg='dopy >= 0.2.3 required for this module'"
|
||||
sys.exit(1)
|
||||
|
||||
class TimeoutError(DoError):
|
||||
|
@ -229,8 +235,8 @@ class Droplet(JsonfyMixIn):
|
|||
cls.manager = DoManager(client_id, api_key)
|
||||
|
||||
@classmethod
|
||||
def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False):
|
||||
json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking)
|
||||
def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False):
|
||||
json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking, backups_enabled)
|
||||
droplet = cls(json)
|
||||
return droplet
|
||||
|
||||
|
@ -333,7 +339,8 @@ def core(module):
|
|||
region_id=getkeyordie('region_id'),
|
||||
ssh_key_ids=module.params['ssh_key_ids'],
|
||||
virtio=module.params['virtio'],
|
||||
private_networking=module.params['private_networking']
|
||||
private_networking=module.params['private_networking'],
|
||||
backups_enabled=module.params['backups_enabled'],
|
||||
)
|
||||
|
||||
if droplet.is_powered_on():
|
||||
|
@ -348,7 +355,7 @@ def core(module):
|
|||
|
||||
elif state in ('absent', 'deleted'):
|
||||
# First, try to find a droplet by id.
|
||||
droplet = Droplet.find(id=getkeyordie('id'))
|
||||
droplet = Droplet.find(module.params['id'])
|
||||
|
||||
# If we couldn't find the droplet and the user is allowing unique
|
||||
# hostnames, then check to see if a droplet with the specified
|
||||
|
@ -392,8 +399,9 @@ def main():
|
|||
image_id = dict(type='int'),
|
||||
region_id = dict(type='int'),
|
||||
ssh_key_ids = dict(default=''),
|
||||
virtio = dict(type='bool', choices=BOOLEANS, default='yes'),
|
||||
private_networking = dict(type='bool', choices=BOOLEANS, default='no'),
|
||||
virtio = dict(type='bool', default='yes'),
|
||||
private_networking = dict(type='bool', default='no'),
|
||||
backups_enabled = dict(type='bool', default='no'),
|
||||
id = dict(aliases=['droplet_id'], type='int'),
|
||||
unique_name = dict(type='bool', default='no'),
|
||||
wait = dict(type='bool', default=True),
|
||||
|
|
242
cloud/digital_ocean_domain
Normal file
242
cloud/digital_ocean_domain
Normal file
|
@ -0,0 +1,242 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: digital_ocean_domain
|
||||
short_description: Create/delete a DNS record in DigitalOcean
|
||||
description:
|
||||
- Create/delete a DNS record in DigitalOcean.
|
||||
version_added: "1.6"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the target.
|
||||
default: present
|
||||
choices: ['present', 'active', 'absent', 'deleted']
|
||||
client_id:
|
||||
description:
|
||||
- Digital Ocean manager id.
|
||||
api_key:
|
||||
description:
|
||||
- Digital Ocean api key.
|
||||
id:
|
||||
description:
|
||||
- Numeric, the droplet id you want to operate on.
|
||||
name:
|
||||
description:
|
||||
- String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key, or the name of a domain.
|
||||
ip:
|
||||
description:
|
||||
- The IP address to point a domain at.
|
||||
|
||||
notes:
|
||||
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create a domain record
|
||||
|
||||
- digital_ocean_domain: >
|
||||
state=present
|
||||
name=my.digitalocean.domain
|
||||
ip=127.0.0.1
|
||||
|
||||
# Create a droplet and a corresponding domain record
|
||||
|
||||
- digital_cean_droplet: >
|
||||
state=present
|
||||
name=test_droplet
|
||||
size_id=1
|
||||
region_id=2
|
||||
image_id=3
|
||||
register: test_droplet
|
||||
|
||||
- digital_ocean_domain: >
|
||||
state=present
|
||||
name={{ test_droplet.name }}.my.domain
|
||||
ip={{ test_droplet.ip_address }}
|
||||
'''
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
|
||||
try:
|
||||
from dopy.manager import DoError, DoManager
|
||||
except ImportError as e:
|
||||
print "failed=True msg='dopy required for this module'"
|
||||
sys.exit(1)
|
||||
|
||||
class TimeoutError(DoError):
|
||||
def __init__(self, msg, id):
|
||||
super(TimeoutError, self).__init__(msg)
|
||||
self.id = id
|
||||
|
||||
class JsonfyMixIn(object):
|
||||
def to_json(self):
|
||||
return self.__dict__
|
||||
|
||||
class DomainRecord(JsonfyMixIn):
|
||||
manager = None
|
||||
|
||||
def __init__(self, json):
|
||||
self.__dict__.update(json)
|
||||
update_attr = __init__
|
||||
|
||||
def update(self, data = None, record_type = None):
|
||||
json = self.manager.edit_domain_record(self.domain_id,
|
||||
self.id,
|
||||
record_type if record_type is not None else self.record_type,
|
||||
data if data is not None else self.data)
|
||||
self.__dict__.update(json)
|
||||
return self
|
||||
|
||||
def destroy(self):
|
||||
json = self.manager.destroy_domain_record(self.domain_id, self.id)
|
||||
return json
|
||||
|
||||
class Domain(JsonfyMixIn):
|
||||
manager = None
|
||||
|
||||
def __init__(self, domain_json):
|
||||
self.__dict__.update(domain_json)
|
||||
|
||||
def destroy(self):
|
||||
self.manager.destroy_domain(self.id)
|
||||
|
||||
def records(self):
|
||||
json = self.manager.all_domain_records(self.id)
|
||||
return map(DomainRecord, json)
|
||||
|
||||
@classmethod
|
||||
def add(cls, name, ip):
|
||||
json = cls.manager.new_domain(name, ip)
|
||||
return cls(json)
|
||||
|
||||
@classmethod
|
||||
def setup(cls, client_id, api_key):
|
||||
cls.manager = DoManager(client_id, api_key)
|
||||
DomainRecord.manager = cls.manager
|
||||
|
||||
@classmethod
|
||||
def list_all(cls):
|
||||
domains = cls.manager.all_domains()
|
||||
return map(cls, domains)
|
||||
|
||||
@classmethod
|
||||
def find(cls, name=None, id=None):
|
||||
if name is None and id is None:
|
||||
return False
|
||||
|
||||
domains = Domain.list_all()
|
||||
|
||||
if id is not None:
|
||||
for domain in domains:
|
||||
if domain.id == id:
|
||||
return domain
|
||||
|
||||
if name is not None:
|
||||
for domain in domains:
|
||||
if domain.name == name:
|
||||
return domain
|
||||
|
||||
return False
|
||||
|
||||
def core(module):
|
||||
def getkeyordie(k):
|
||||
v = module.params[k]
|
||||
if v is None:
|
||||
module.fail_json(msg='Unable to load %s' % k)
|
||||
return v
|
||||
|
||||
try:
|
||||
# params['client_id'] will be None even if client_id is not passed in
|
||||
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
|
||||
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
|
||||
except KeyError, e:
|
||||
module.fail_json(msg='Unable to load %s' % e.message)
|
||||
|
||||
changed = True
|
||||
state = module.params['state']
|
||||
|
||||
Domain.setup(client_id, api_key)
|
||||
if state in ('present'):
|
||||
domain = Domain.find(id=module.params["id"])
|
||||
|
||||
if not domain:
|
||||
domain = Domain.find(name=getkeyordie("name"))
|
||||
|
||||
if not domain:
|
||||
domain = Domain.add(getkeyordie("name"),
|
||||
getkeyordie("ip"))
|
||||
module.exit_json(changed=True, domain=domain.to_json())
|
||||
else:
|
||||
records = domain.records()
|
||||
at_record = None
|
||||
for record in records:
|
||||
if record.name == "@":
|
||||
at_record = record
|
||||
|
||||
if not at_record.data == getkeyordie("ip"):
|
||||
record.update(data=getkeyordie("ip"), record_type='A')
|
||||
module.exit_json(changed=True, domain=Domain.find(id=record.domain_id).to_json())
|
||||
|
||||
module.exit_json(changed=False, domain=domain.to_json())
|
||||
|
||||
elif state in ('absent'):
|
||||
domain = None
|
||||
if "id" in module.params:
|
||||
domain = Domain.find(id=module.params["id"])
|
||||
|
||||
if not domain and "name" in module.params:
|
||||
domain = Domain.find(name=module.params["name"])
|
||||
|
||||
if not domain:
|
||||
module.exit_json(changed=False, msg="Domain not found.")
|
||||
|
||||
event_json = domain.destroy()
|
||||
module.exit_json(changed=True, event=event_json)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'),
|
||||
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
|
||||
api_key = dict(aliases=['API_KEY'], no_log=True),
|
||||
name = dict(type='str'),
|
||||
id = dict(aliases=['droplet_id'], type='int'),
|
||||
ip = dict(type='str'),
|
||||
),
|
||||
required_one_of = (
|
||||
['id', 'name'],
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
core(module)
|
||||
except TimeoutError as e:
|
||||
module.fail_json(msg=str(e), id=e.id)
|
||||
except (DoError, Exception) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
main()
|
178
cloud/digital_ocean_sshkey
Normal file
178
cloud/digital_ocean_sshkey
Normal file
|
@ -0,0 +1,178 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: digital_ocean_sshkey
|
||||
short_description: Create/delete an SSH key in DigitalOcean
|
||||
description:
|
||||
- Create/delete an SSH key.
|
||||
version_added: "1.6"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the target.
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
client_id:
|
||||
description:
|
||||
- Digital Ocean manager id.
|
||||
api_key:
|
||||
description:
|
||||
- Digital Ocean api key.
|
||||
id:
|
||||
description:
|
||||
- Numeric, the SSH key id you want to operate on.
|
||||
name:
|
||||
description:
|
||||
- String, this is the name of an SSH key to create or destroy.
|
||||
ssh_pub_key:
|
||||
description:
|
||||
- The public SSH key you want to add to your account.
|
||||
|
||||
notes:
|
||||
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
# Ensure a SSH key is present
|
||||
# If a key matches this name, will return the ssh key id and changed = False
|
||||
# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False
|
||||
|
||||
- digital_ocean_sshkey: >
|
||||
state=present
|
||||
name=my_ssh_key
|
||||
ssh_pub_key='ssh-rsa AAAA...'
|
||||
client_id=XXX
|
||||
api_key=XXX
|
||||
|
||||
'''
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
|
||||
try:
|
||||
from dopy.manager import DoError, DoManager
|
||||
except ImportError as e:
|
||||
print "failed=True msg='dopy required for this module'"
|
||||
sys.exit(1)
|
||||
|
||||
class TimeoutError(DoError):
|
||||
def __init__(self, msg, id):
|
||||
super(TimeoutError, self).__init__(msg)
|
||||
self.id = id
|
||||
|
||||
class JsonfyMixIn(object):
|
||||
def to_json(self):
|
||||
return self.__dict__
|
||||
|
||||
class SSH(JsonfyMixIn):
|
||||
manager = None
|
||||
|
||||
def __init__(self, ssh_key_json):
|
||||
self.__dict__.update(ssh_key_json)
|
||||
update_attr = __init__
|
||||
|
||||
def destroy(self):
|
||||
self.manager.destroy_ssh_key(self.id)
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def setup(cls, client_id, api_key):
|
||||
cls.manager = DoManager(client_id, api_key)
|
||||
|
||||
@classmethod
|
||||
def find(cls, name):
|
||||
if not name:
|
||||
return False
|
||||
keys = cls.list_all()
|
||||
for key in keys:
|
||||
if key.name == name:
|
||||
return key
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def list_all(cls):
|
||||
json = cls.manager.all_ssh_keys()
|
||||
return map(cls, json)
|
||||
|
||||
@classmethod
|
||||
def add(cls, name, key_pub):
|
||||
json = cls.manager.new_ssh_key(name, key_pub)
|
||||
return cls(json)
|
||||
|
||||
def core(module):
|
||||
def getkeyordie(k):
|
||||
v = module.params[k]
|
||||
if v is None:
|
||||
module.fail_json(msg='Unable to load %s' % k)
|
||||
return v
|
||||
|
||||
try:
|
||||
# params['client_id'] will be None even if client_id is not passed in
|
||||
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
|
||||
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
|
||||
except KeyError, e:
|
||||
module.fail_json(msg='Unable to load %s' % e.message)
|
||||
|
||||
changed = True
|
||||
state = module.params['state']
|
||||
|
||||
SSH.setup(client_id, api_key)
|
||||
name = getkeyordie('name')
|
||||
if state in ('present'):
|
||||
key = SSH.find(name)
|
||||
if key:
|
||||
module.exit_json(changed=False, ssh_key=key.to_json())
|
||||
key = SSH.add(name, getkeyordie('ssh_pub_key'))
|
||||
module.exit_json(changed=True, ssh_key=key.to_json())
|
||||
|
||||
elif state in ('absent'):
|
||||
key = SSH.find(name)
|
||||
if not key:
|
||||
module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name)
|
||||
key.destroy()
|
||||
module.exit_json(changed=True)
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
state = dict(choices=['present', 'absent'], default='present'),
|
||||
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
|
||||
api_key = dict(aliases=['API_KEY'], no_log=True),
|
||||
name = dict(type='str'),
|
||||
id = dict(aliases=['droplet_id'], type='int'),
|
||||
ssh_pub_key = dict(type='str'),
|
||||
),
|
||||
required_one_of = (
|
||||
['id', 'name'],
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
core(module)
|
||||
except TimeoutError as e:
|
||||
module.fail_json(msg=str(e), id=e.id)
|
||||
except (DoError, Exception) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
main()
|
120
cloud/docker
120
cloud/docker
|
@ -148,7 +148,7 @@ options:
|
|||
- Set the state of the container
|
||||
required: false
|
||||
default: present
|
||||
choices: [ "present", "stopped", "absent", "killed", "restarted" ]
|
||||
choices: [ "present", "running", "stopped", "absent", "killed", "restarted" ]
|
||||
aliases: []
|
||||
privileged:
|
||||
description:
|
||||
|
@ -169,6 +169,20 @@ options:
|
|||
default: null
|
||||
aliases: []
|
||||
version_added: "1.5"
|
||||
stdin_open:
|
||||
description:
|
||||
- Keep stdin open
|
||||
required: false
|
||||
default: false
|
||||
aliases: []
|
||||
version_added: "1.6"
|
||||
tty:
|
||||
description:
|
||||
- Allocate a pseudo-tty
|
||||
required: false
|
||||
default: false
|
||||
aliases: []
|
||||
version_added: "1.6"
|
||||
author: Cove Schneider, Joshua Conner, Pavel Antonov
|
||||
requirements: [ "docker-py >= 0.3.0" ]
|
||||
'''
|
||||
|
@ -287,6 +301,7 @@ import sys
|
|||
from urlparse import urlparse
|
||||
try:
|
||||
import docker.client
|
||||
import docker.utils
|
||||
from requests.exceptions import *
|
||||
except ImportError, e:
|
||||
HAS_DOCKER_PY = False
|
||||
|
@ -331,7 +346,7 @@ class DockerManager:
|
|||
if self.module.params.get('volumes'):
|
||||
self.binds = {}
|
||||
self.volumes = {}
|
||||
vols = self.parse_list_from_param('volumes')
|
||||
vols = self.module.params.get('volumes')
|
||||
for vol in vols:
|
||||
parts = vol.split(":")
|
||||
# host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container)
|
||||
|
@ -345,46 +360,32 @@ class DockerManager:
|
|||
self.lxc_conf = None
|
||||
if self.module.params.get('lxc_conf'):
|
||||
self.lxc_conf = []
|
||||
options = self.parse_list_from_param('lxc_conf')
|
||||
options = self.module.params.get('lxc_conf')
|
||||
for option in options:
|
||||
parts = option.split(':')
|
||||
self.lxc_conf.append({"Key": parts[0], "Value": parts[1]})
|
||||
|
||||
self.exposed_ports = None
|
||||
if self.module.params.get('expose'):
|
||||
expose = self.parse_list_from_param('expose')
|
||||
self.exposed_ports = self.get_exposed_ports(expose)
|
||||
self.exposed_ports = self.get_exposed_ports(self.module.params.get('expose'))
|
||||
|
||||
self.port_bindings = None
|
||||
if self.module.params.get('ports'):
|
||||
ports = self.parse_list_from_param('ports')
|
||||
self.port_bindings = self.get_port_bindings(ports)
|
||||
self.port_bindings = self.get_port_bindings(self.module.params.get('ports'))
|
||||
|
||||
self.links = None
|
||||
if self.module.params.get('links'):
|
||||
links = self.parse_list_from_param('links')
|
||||
self.links = dict(map(lambda x: x.split(':'), links))
|
||||
self.links = dict(map(lambda x: x.split(':'), self.module.params.get('links')))
|
||||
|
||||
self.env = None
|
||||
if self.module.params.get('env'):
|
||||
env = self.parse_list_from_param('env')
|
||||
self.env = dict(map(lambda x: x.split("="), env))
|
||||
self.env = dict(map(lambda x: x.split("="), self.module.params.get('env')))
|
||||
|
||||
# connect to docker server
|
||||
docker_url = urlparse(module.params.get('docker_url'))
|
||||
self.client = docker.Client(base_url=docker_url.geturl())
|
||||
|
||||
|
||||
def parse_list_from_param(self, param_name, delimiter=','):
|
||||
"""
|
||||
Get a list from a module parameter, whether it's specified as a delimiter-separated string or is already in list form.
|
||||
"""
|
||||
param_list = self.module.params.get(param_name)
|
||||
if not isinstance(param_list, list):
|
||||
param_list = param_list.split(delimiter)
|
||||
return param_list
|
||||
|
||||
|
||||
def get_exposed_ports(self, expose_list):
|
||||
"""
|
||||
Parse the ports and protocols (TCP/UDP) to expose in the docker-py `create_container` call from the docker CLI-style syntax.
|
||||
|
@ -409,7 +410,9 @@ class DockerManager:
|
|||
"""
|
||||
binds = {}
|
||||
for port in ports:
|
||||
parts = port.split(':')
|
||||
# ports could potentially be an array like [80, 443], so we make sure they're strings
|
||||
# before splitting
|
||||
parts = str(port).split(':')
|
||||
container_port = parts[-1]
|
||||
if '/' not in container_port:
|
||||
container_port = int(parts[-1])
|
||||
|
@ -522,15 +525,19 @@ class DockerManager:
|
|||
'command': self.module.params.get('command'),
|
||||
'ports': self.exposed_ports,
|
||||
'volumes': self.volumes,
|
||||
'volumes_from': self.module.params.get('volumes_from'),
|
||||
'mem_limit': _human_to_bytes(self.module.params.get('memory_limit')),
|
||||
'environment': self.env,
|
||||
'dns': self.module.params.get('dns'),
|
||||
'hostname': self.module.params.get('hostname'),
|
||||
'detach': self.module.params.get('detach'),
|
||||
'name': self.module.params.get('name'),
|
||||
'stdin_open': self.module.params.get('stdin_open'),
|
||||
'tty': self.module.params.get('tty'),
|
||||
}
|
||||
|
||||
if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) < 0:
|
||||
params['dns'] = self.module.params.get('dns')
|
||||
params['volumes_from'] = self.module.params.get('volumes_from')
|
||||
|
||||
def do_create(count, params):
|
||||
results = []
|
||||
for _ in range(count):
|
||||
|
@ -558,6 +565,11 @@ class DockerManager:
|
|||
'privileged': self.module.params.get('privileged'),
|
||||
'links': self.links,
|
||||
}
|
||||
|
||||
if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) >= 0:
|
||||
params['dns'] = self.module.params.get('dns')
|
||||
params['volumes_from'] = self.module.params.get('volumes_from')
|
||||
|
||||
for i in containers:
|
||||
self.client.start(i['Id'], **params)
|
||||
self.increment_counter('started')
|
||||
|
@ -616,12 +628,12 @@ def main():
|
|||
count = dict(default=1),
|
||||
image = dict(required=True),
|
||||
command = dict(required=False, default=None),
|
||||
expose = dict(required=False, default=None),
|
||||
ports = dict(required=False, default=None),
|
||||
expose = dict(required=False, default=None, type='list'),
|
||||
ports = dict(required=False, default=None, type='list'),
|
||||
publish_all_ports = dict(default=False, type='bool'),
|
||||
volumes = dict(default=None),
|
||||
volumes = dict(default=None, type='list'),
|
||||
volumes_from = dict(default=None),
|
||||
links = dict(default=None),
|
||||
links = dict(default=None, type='list'),
|
||||
memory_limit = dict(default=0),
|
||||
memory_swap = dict(default=0),
|
||||
docker_url = dict(default='unix://var/run/docker.sock'),
|
||||
|
@ -629,13 +641,15 @@ def main():
|
|||
password = dict(),
|
||||
email = dict(),
|
||||
hostname = dict(default=None),
|
||||
env = dict(),
|
||||
env = dict(type='list'),
|
||||
dns = dict(),
|
||||
detach = dict(default=True, type='bool'),
|
||||
state = dict(default='present', choices=['absent', 'present', 'stopped', 'killed', 'restarted']),
|
||||
state = dict(default='running', choices=['absent', 'present', 'running', 'stopped', 'killed', 'restarted']),
|
||||
debug = dict(default=False, type='bool'),
|
||||
privileged = dict(default=False, type='bool'),
|
||||
lxc_conf = dict(default=None),
|
||||
stdin_open = dict(default=False, type='bool'),
|
||||
tty = dict(default=False, type='bool'),
|
||||
lxc_conf = dict(default=None, type='list'),
|
||||
name = dict(default=None)
|
||||
)
|
||||
)
|
||||
|
@ -662,25 +676,35 @@ def main():
|
|||
changed = False
|
||||
|
||||
# start/stop containers
|
||||
if state == "present":
|
||||
|
||||
# make sure a container with `name` is running
|
||||
if name and "/" + name not in map(lambda x: x.get('Name'), running_containers):
|
||||
if state in [ "running", "present" ]:
|
||||
|
||||
# make sure a container with `name` exists, if not create and start it
|
||||
if name and "/" + name not in map(lambda x: x.get('Name'), deployed_containers):
|
||||
containers = manager.create_containers(1)
|
||||
manager.start_containers(containers)
|
||||
if state == "present": #otherwise it get (re)started later anyways..
|
||||
manager.start_containers(containers)
|
||||
running_containers = manager.get_running_containers()
|
||||
deployed_containers = manager.get_deployed_containers()
|
||||
|
||||
# start more containers if we don't have enough
|
||||
elif delta > 0:
|
||||
containers = manager.create_containers(delta)
|
||||
manager.start_containers(containers)
|
||||
|
||||
# stop containers if we have too many
|
||||
elif delta < 0:
|
||||
containers_to_stop = running_containers[0:abs(delta)]
|
||||
containers = manager.stop_containers(containers_to_stop)
|
||||
manager.remove_containers(containers_to_stop)
|
||||
|
||||
facts = manager.get_running_containers()
|
||||
if state == "running":
|
||||
# make sure a container with `name` is running
|
||||
if name and "/" + name not in map(lambda x: x.get('Name'), running_containers):
|
||||
manager.start_containers(deployed_containers)
|
||||
|
||||
# start more containers if we don't have enough
|
||||
elif delta > 0:
|
||||
containers = manager.create_containers(delta)
|
||||
manager.start_containers(containers)
|
||||
|
||||
# stop containers if we have too many
|
||||
elif delta < 0:
|
||||
containers_to_stop = running_containers[0:abs(delta)]
|
||||
containers = manager.stop_containers(containers_to_stop)
|
||||
manager.remove_containers(containers_to_stop)
|
||||
|
||||
facts = manager.get_running_containers()
|
||||
else:
|
||||
acts = manager.get_deployed_containers()
|
||||
|
||||
# stop and remove containers
|
||||
elif state == "absent":
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/env python
|
||||
#!/usr/bin/python
|
||||
#
|
||||
|
||||
# (c) 2014, Pavel Antonov <antonov@adwz.ru>
|
||||
|
@ -137,6 +137,9 @@ class DockerImageManager:
|
|||
self.changed = True
|
||||
|
||||
for chunk in stream:
|
||||
if not chunk:
|
||||
continue
|
||||
|
||||
chunk_json = json.loads(chunk)
|
||||
|
||||
if 'error' in chunk_json:
|
||||
|
|
220
cloud/ec2
220
cloud/ec2
|
@ -67,6 +67,13 @@ options:
|
|||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
spot_price:
|
||||
version_added: "1.5"
|
||||
description:
|
||||
- Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid. When it is filled, the instance is started.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
image:
|
||||
description:
|
||||
- I(emi) (or I(ami)) to use for the instance
|
||||
|
@ -97,24 +104,12 @@ options:
|
|||
- how long before wait gives up, in seconds
|
||||
default: 300
|
||||
aliases: []
|
||||
ec2_url:
|
||||
spot_wait_timeout:
|
||||
version_added: "1.5"
|
||||
description:
|
||||
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used
|
||||
required: false
|
||||
default: null
|
||||
- how long to wait for the spot instance request to be fulfilled
|
||||
default: 600
|
||||
aliases: []
|
||||
aws_secret_key:
|
||||
description:
|
||||
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ 'ec2_secret_key', 'secret_key' ]
|
||||
aws_access_key:
|
||||
description:
|
||||
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ 'ec2_access_key', 'access_key' ]
|
||||
count:
|
||||
description:
|
||||
- number of instances to launch
|
||||
|
@ -157,7 +152,7 @@ options:
|
|||
default: null
|
||||
aliases: []
|
||||
assign_public_ip:
|
||||
version_added: "1.4"
|
||||
version_added: "1.5"
|
||||
description:
|
||||
- when provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+
|
||||
required: false
|
||||
|
@ -184,6 +179,12 @@ options:
|
|||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
source_dest_check:
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers)
|
||||
required: false
|
||||
default: true
|
||||
state:
|
||||
version_added: "1.3"
|
||||
description:
|
||||
|
@ -198,6 +199,12 @@ options:
|
|||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
ebs_optimized:
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html)
|
||||
required: false
|
||||
default: false
|
||||
exact_count:
|
||||
version_added: "1.5"
|
||||
description:
|
||||
|
@ -212,17 +219,9 @@ options:
|
|||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
validate_certs:
|
||||
description:
|
||||
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
|
||||
required: false
|
||||
default: "yes"
|
||||
choices: ["yes", "no"]
|
||||
aliases: []
|
||||
version_added: "1.5"
|
||||
|
||||
requirements: [ "boto" ]
|
||||
author: Seth Vidal, Tim Gerla, Lester Wade
|
||||
extends_documentation_fragment: aws
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -253,7 +252,7 @@ EXAMPLES = '''
|
|||
db: postgres
|
||||
monitoring: yes
|
||||
|
||||
# Single instance with additional IOPS volume from snapshot
|
||||
# Single instance with additional IOPS volume from snapshot and volume delete on termination
|
||||
local_action:
|
||||
module: ec2
|
||||
key_name: mykey
|
||||
|
@ -268,6 +267,7 @@ local_action:
|
|||
device_type: io1
|
||||
iops: 1000
|
||||
volume_size: 100
|
||||
delete_on_termination: true
|
||||
monitoring: yes
|
||||
|
||||
# Multiple groups example
|
||||
|
@ -311,6 +311,19 @@ local_action:
|
|||
vpc_subnet_id: subnet-29e63245
|
||||
assign_public_ip: yes
|
||||
|
||||
# Spot instance example
|
||||
- local_action:
|
||||
module: ec2
|
||||
spot_price: 0.24
|
||||
spot_wait_timeout: 600
|
||||
keypair: mykey
|
||||
group_id: sg-1dc53f72
|
||||
instance_type: m1.small
|
||||
image: ami-6e649707
|
||||
wait: yes
|
||||
vpc_subnet_id: subnet-29e63245
|
||||
assign_public_ip: yes
|
||||
|
||||
# Launch instances, runs some tasks
|
||||
# and then terminate them
|
||||
|
||||
|
@ -557,7 +570,8 @@ def get_instance_info(inst):
|
|||
'root_device_type': inst.root_device_type,
|
||||
'root_device_name': inst.root_device_name,
|
||||
'state': inst.state,
|
||||
'hypervisor': inst.hypervisor}
|
||||
'hypervisor': inst.hypervisor,
|
||||
'ebs_optimized': inst.ebs_optimized}
|
||||
try:
|
||||
instance_info['virtualization_type'] = getattr(inst,'virtualization_type')
|
||||
except AttributeError:
|
||||
|
@ -620,6 +634,17 @@ def create_block_device(module, ec2, volume):
|
|||
delete_on_termination=volume.get('delete_on_termination', False),
|
||||
iops=volume.get('iops'))
|
||||
|
||||
def boto_supports_param_in_spot_request(ec2, param):
|
||||
"""
|
||||
Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
|
||||
|
||||
ec2: authenticated ec2 connection object
|
||||
|
||||
Returns:
|
||||
True if boto library has the named param as an argument on the request_spot_instances method, else False
|
||||
"""
|
||||
method = getattr(ec2, 'request_spot_instances')
|
||||
return param in method.func_code.co_varnames
|
||||
|
||||
def enforce_count(module, ec2):
|
||||
|
||||
|
@ -644,7 +669,6 @@ def enforce_count(module, ec2):
|
|||
|
||||
for inst in instance_dict_array:
|
||||
instances.append(inst)
|
||||
|
||||
elif len(instances) > exact_count:
|
||||
changed = True
|
||||
to_remove = len(instances) - exact_count
|
||||
|
@ -690,6 +714,7 @@ def create_instances(module, ec2, override_count=None):
|
|||
group_id = module.params.get('group_id')
|
||||
zone = module.params.get('zone')
|
||||
instance_type = module.params.get('instance_type')
|
||||
spot_price = module.params.get('spot_price')
|
||||
image = module.params.get('image')
|
||||
if override_count:
|
||||
count = override_count
|
||||
|
@ -700,6 +725,7 @@ def create_instances(module, ec2, override_count=None):
|
|||
ramdisk = module.params.get('ramdisk')
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
|
||||
placement_group = module.params.get('placement_group')
|
||||
user_data = module.params.get('user_data')
|
||||
instance_tags = module.params.get('instance_tags')
|
||||
|
@ -708,8 +734,10 @@ def create_instances(module, ec2, override_count=None):
|
|||
private_ip = module.params.get('private_ip')
|
||||
instance_profile_name = module.params.get('instance_profile_name')
|
||||
volumes = module.params.get('volumes')
|
||||
ebs_optimized = module.params.get('ebs_optimized')
|
||||
exact_count = module.params.get('exact_count')
|
||||
count_tag = module.params.get('count_tag')
|
||||
source_dest_check = module.boolean(module.params.get('source_dest_check'))
|
||||
|
||||
# group_id and group_name are exclusive of each other
|
||||
if group_id and group_name:
|
||||
|
@ -760,18 +788,16 @@ def create_instances(module, ec2, override_count=None):
|
|||
try:
|
||||
params = {'image_id': image,
|
||||
'key_name': key_name,
|
||||
'client_token': id,
|
||||
'min_count': count_remaining,
|
||||
'max_count': count_remaining,
|
||||
'monitoring_enabled': monitoring,
|
||||
'placement': zone,
|
||||
'placement_group': placement_group,
|
||||
'instance_type': instance_type,
|
||||
'kernel_id': kernel,
|
||||
'ramdisk_id': ramdisk,
|
||||
'private_ip_address': private_ip,
|
||||
'user_data': user_data}
|
||||
|
||||
if ebs_optimized:
|
||||
params['ebs_optimized'] = ebs_optimized
|
||||
|
||||
if boto_supports_profile_name_arg(ec2):
|
||||
params['instance_profile_name'] = instance_profile_name
|
||||
else:
|
||||
|
@ -788,13 +814,19 @@ def create_instances(module, ec2, override_count=None):
|
|||
msg="assign_public_ip only available with vpc_subnet_id")
|
||||
|
||||
else:
|
||||
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
|
||||
subnet_id=vpc_subnet_id,
|
||||
groups=group_id,
|
||||
associate_public_ip_address=assign_public_ip)
|
||||
if private_ip:
|
||||
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
|
||||
subnet_id=vpc_subnet_id,
|
||||
private_ip_address=private_ip,
|
||||
groups=group_id,
|
||||
associate_public_ip_address=assign_public_ip)
|
||||
else:
|
||||
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
|
||||
subnet_id=vpc_subnet_id,
|
||||
groups=group_id,
|
||||
associate_public_ip_address=assign_public_ip)
|
||||
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
|
||||
params['network_interfaces'] = interfaces
|
||||
|
||||
params['network_interfaces'] = interfaces
|
||||
else:
|
||||
params['subnet_id'] = vpc_subnet_id
|
||||
if vpc_subnet_id:
|
||||
|
@ -814,38 +846,88 @@ def create_instances(module, ec2, override_count=None):
|
|||
|
||||
params['block_device_map'] = bdm
|
||||
|
||||
res = ec2.run_instances(**params)
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
instids = [ i.id for i in res.instances ]
|
||||
while True:
|
||||
try:
|
||||
res.connection.get_all_instances(instids)
|
||||
break
|
||||
except boto.exception.EC2ResponseError, e:
|
||||
if "<Code>InvalidInstanceID.NotFound</Code>" in str(e):
|
||||
# there's a race between start and get an instance
|
||||
continue
|
||||
# check to see if we're using spot pricing first before starting instances
|
||||
if not spot_price:
|
||||
if assign_public_ip and private_ip:
|
||||
params.update(dict(
|
||||
min_count = count_remaining,
|
||||
max_count = count_remaining,
|
||||
client_token = id,
|
||||
placement_group = placement_group,
|
||||
))
|
||||
else:
|
||||
module.fail_json(msg = str(e))
|
||||
params.update(dict(
|
||||
min_count = count_remaining,
|
||||
max_count = count_remaining,
|
||||
client_token = id,
|
||||
placement_group = placement_group,
|
||||
private_ip_address = private_ip,
|
||||
))
|
||||
|
||||
res = ec2.run_instances(**params)
|
||||
instids = [ i.id for i in res.instances ]
|
||||
while True:
|
||||
try:
|
||||
ec2.get_all_instances(instids)
|
||||
break
|
||||
except boto.exception.EC2ResponseError as e:
|
||||
if "<Code>InvalidInstanceID.NotFound</Code>" in str(e):
|
||||
# there's a race between start and get an instance
|
||||
continue
|
||||
else:
|
||||
module.fail_json(msg = str(e))
|
||||
else:
|
||||
if private_ip:
|
||||
module.fail_json(
|
||||
msg='private_ip only available with on-demand (non-spot) instances')
|
||||
if boto_supports_param_in_spot_request(ec2, placement_group):
|
||||
params['placement_group'] = placement_group
|
||||
elif placement_group :
|
||||
module.fail_json(
|
||||
msg="placement_group parameter requires Boto version 2.3.0 or higher.")
|
||||
|
||||
params.update(dict(
|
||||
count = count_remaining,
|
||||
))
|
||||
res = ec2.request_spot_instances(spot_price, **params)
|
||||
|
||||
# Now we have to do the intermediate waiting
|
||||
if wait:
|
||||
spot_req_inst_ids = dict()
|
||||
spot_wait_timeout = time.time() + spot_wait_timeout
|
||||
while spot_wait_timeout > time.time():
|
||||
reqs = ec2.get_all_spot_instance_requests()
|
||||
for sirb in res:
|
||||
if sirb.id in spot_req_inst_ids:
|
||||
continue
|
||||
for sir in reqs:
|
||||
if sir.id == sirb.id and sir.instance_id is not None:
|
||||
spot_req_inst_ids[sirb.id] = sir.instance_id
|
||||
if len(spot_req_inst_ids) < count:
|
||||
time.sleep(5)
|
||||
else:
|
||||
break
|
||||
if spot_wait_timeout <= time.time():
|
||||
module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime())
|
||||
instids = spot_req_inst_ids.values()
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
if instance_tags:
|
||||
try:
|
||||
ec2.create_tags(instids, instance_tags)
|
||||
except boto.exception.EC2ResponseError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
module.fail_json(msg = "Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
# wait here until the instances are up
|
||||
this_res = []
|
||||
num_running = 0
|
||||
wait_timeout = time.time() + wait_timeout
|
||||
while wait_timeout > time.time() and num_running < len(instids):
|
||||
res_list = res.connection.get_all_instances(instids)
|
||||
if len(res_list) > 0:
|
||||
this_res = res_list[0]
|
||||
num_running = len([ i for i in this_res.instances if i.state=='running' ])
|
||||
else:
|
||||
res_list = ec2.get_all_instances(instids)
|
||||
num_running = 0
|
||||
for res in res_list:
|
||||
num_running += len([ i for i in res.instances if i.state=='running' ])
|
||||
if len(res_list) <= 0:
|
||||
# got a bad response of some sort, possibly due to
|
||||
# stale/cached data. Wait a second and then try again
|
||||
time.sleep(1)
|
||||
|
@ -859,8 +941,14 @@ def create_instances(module, ec2, override_count=None):
|
|||
# waiting took too long
|
||||
module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime())
|
||||
|
||||
for inst in this_res.instances:
|
||||
running_instances.append(inst)
|
||||
#We do this after the loop ends so that we end up with one list
|
||||
for res in res_list:
|
||||
running_instances.extend(res.instances)
|
||||
|
||||
# Enabled by default by Amazon
|
||||
if not source_dest_check:
|
||||
for inst in res.instances:
|
||||
inst.modify_attribute('sourceDestCheck', False)
|
||||
|
||||
instance_dict_array = []
|
||||
created_instance_ids = []
|
||||
|
@ -1020,13 +1108,15 @@ def main():
|
|||
group_id = dict(type='list'),
|
||||
zone = dict(aliases=['aws_zone', 'ec2_zone']),
|
||||
instance_type = dict(aliases=['type']),
|
||||
spot_price = dict(),
|
||||
image = dict(),
|
||||
kernel = dict(),
|
||||
count = dict(default='1'),
|
||||
count = dict(type='int', default='1'),
|
||||
monitoring = dict(type='bool', default=False),
|
||||
ramdisk = dict(),
|
||||
wait = dict(type='bool', default=False),
|
||||
wait_timeout = dict(default=300),
|
||||
spot_wait_timeout = dict(default=600),
|
||||
placement_group = dict(),
|
||||
user_data = dict(),
|
||||
instance_tags = dict(type='dict'),
|
||||
|
@ -1035,10 +1125,12 @@ def main():
|
|||
private_ip = dict(),
|
||||
instance_profile_name = dict(),
|
||||
instance_ids = dict(type='list'),
|
||||
source_dest_check = dict(type='bool', default=True),
|
||||
state = dict(default='present'),
|
||||
exact_count = dict(type='int', default=None),
|
||||
count_tag = dict(),
|
||||
volumes = dict(type='list'),
|
||||
ebs_optimized = dict(),
|
||||
)
|
||||
)
|
||||
|
||||
|
|
|
@ -22,24 +22,6 @@ short_description: create or destroy an image in ec2, return imageid
|
|||
description:
|
||||
- Creates or deletes ec2 images. This module has a dependency on python-boto >= 2.5
|
||||
options:
|
||||
ec2_url:
|
||||
description:
|
||||
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
aws_secret_key:
|
||||
description:
|
||||
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ 'ec2_secret_key', 'secret_key' ]
|
||||
aws_access_key:
|
||||
description:
|
||||
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
|
||||
required: false
|
||||
default: null
|
||||
aliases: ['ec2_access_key', 'access_key' ]
|
||||
instance_id:
|
||||
description:
|
||||
- instance id of the image to create
|
||||
|
@ -101,17 +83,9 @@ options:
|
|||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
validate_certs:
|
||||
description:
|
||||
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
|
||||
required: false
|
||||
default: "yes"
|
||||
choices: ["yes", "no"]
|
||||
aliases: []
|
||||
version_added: "1.5"
|
||||
|
||||
requirements: [ "boto" ]
|
||||
author: Evan Duffield <eduffield@iacquire.com>
|
||||
extends_documentation_fragment: aws
|
||||
'''
|
||||
|
||||
# Thank you to iAcquire for sponsoring development of this module.
|
||||
|
|
196
cloud/ec2_ami_search
Normal file
196
cloud/ec2_ami_search
Normal file
|
@ -0,0 +1,196 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2013, Nimbis Services
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_ami_search
|
||||
short_description: Retrieve AWS AMI for a given operating system.
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- Look up the most recent AMI on AWS for a given operating system.
|
||||
- Returns C(ami), C(aki), C(ari), C(serial), C(tag)
|
||||
- If there is no AKI or ARI associated with an image, these will be C(null).
|
||||
- Only supports images from cloud-images.ubuntu.com
|
||||
- 'Example output: C({"ami": "ami-69f5a900", "changed": false, "aki": "aki-88aa75e1", "tag": "release", "ari": null, "serial": "20131024"})'
|
||||
version_added: "1.6"
|
||||
options:
|
||||
distro:
|
||||
description: Linux distribution (e.g., C(ubuntu))
|
||||
required: true
|
||||
choices: ["ubuntu"]
|
||||
release:
|
||||
description: short name of the release (e.g., C(precise))
|
||||
required: true
|
||||
stream:
|
||||
description: Type of release.
|
||||
required: false
|
||||
default: "server"
|
||||
choices: ["server", "desktop"]
|
||||
store:
|
||||
description: Back-end store for instance
|
||||
required: false
|
||||
default: "ebs"
|
||||
choices: ["ebs", "instance-store"]
|
||||
arch:
|
||||
description: CPU architecture
|
||||
required: false
|
||||
default: "amd64"
|
||||
choices: ["i386", "amd64"]
|
||||
region:
|
||||
description: EC2 region
|
||||
required: false
|
||||
default: us-east-1
|
||||
choices: ["ap-northeast-1", "ap-southeast-1", "ap-southeast-2",
|
||||
"eu-west-1", "sa-east-1", "us-east-1", "us-west-1", "us-west-2"]
|
||||
virt:
|
||||
description: virutalization type
|
||||
required: false
|
||||
default: paravirtual
|
||||
choices: ["paravirtual", "hvm"]
|
||||
|
||||
author: Lorin Hochstein
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Launch an Ubuntu 12.04 (Precise Pangolin) EC2 instance
|
||||
hosts: 127.0.0.1
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Get the Ubuntu precise AMI
|
||||
ec2_ami_search: distro=ubuntu release=precise region=us-west-1 store=instance-store
|
||||
register: ubuntu_image
|
||||
- name: Start the EC2 instance
|
||||
ec2: image={{ ubuntu_image.ami }} instance_type=m1.small key_name=mykey
|
||||
'''
|
||||
|
||||
import csv
|
||||
import json
|
||||
import urllib2
|
||||
import urlparse
|
||||
|
||||
SUPPORTED_DISTROS = ['ubuntu']
|
||||
|
||||
AWS_REGIONS = ['ap-northeast-1',
|
||||
'ap-southeast-1',
|
||||
'ap-southeast-2',
|
||||
'eu-west-1',
|
||||
'sa-east-1',
|
||||
'us-east-1',
|
||||
'us-west-1',
|
||||
'us-west-2']
|
||||
|
||||
|
||||
def get_url(module, url):
|
||||
""" Get url and return response """
|
||||
try:
|
||||
r = urllib2.urlopen(url)
|
||||
except (urllib2.HTTPError, urllib2.URLError), e:
|
||||
code = getattr(e, 'code', -1)
|
||||
module.fail_json(msg="Request failed: %s" % str(e), status_code=code)
|
||||
return r
|
||||
|
||||
|
||||
def ubuntu(module):
|
||||
""" Get the ami for ubuntu """
|
||||
|
||||
release = module.params['release']
|
||||
stream = module.params['stream']
|
||||
store = module.params['store']
|
||||
arch = module.params['arch']
|
||||
region = module.params['region']
|
||||
virt = module.params['virt']
|
||||
|
||||
url = get_ubuntu_url(release, stream)
|
||||
|
||||
req = get_url(module, url)
|
||||
reader = csv.reader(req, delimiter='\t')
|
||||
try:
|
||||
ami, aki, ari, tag, serial = lookup_ubuntu_ami(reader, release, stream,
|
||||
store, arch, region, virt)
|
||||
module.exit_json(changed=False, ami=ami, aki=aki, ari=ari, tag=tag,
|
||||
serial=serial)
|
||||
except KeyError:
|
||||
module.fail_json(msg="No matching AMI found")
|
||||
|
||||
|
||||
def lookup_ubuntu_ami(table, release, stream, store, arch, region, virt):
|
||||
""" Look up the Ubuntu AMI that matches query given a table of AMIs
|
||||
|
||||
table: an iterable that returns a row of
|
||||
(release, stream, tag, serial, region, ami, aki, ari, virt)
|
||||
release: ubuntu release name
|
||||
stream: 'server' or 'desktop'
|
||||
store: 'ebs' or 'instance-store'
|
||||
arch: 'i386' or 'amd64'
|
||||
region: EC2 region
|
||||
virt: 'paravirtual' or 'hvm'
|
||||
|
||||
Returns (ami, aki, ari, tag, serial)"""
|
||||
expected = (release, stream, store, arch, region, virt)
|
||||
|
||||
for row in table:
|
||||
(actual_release, actual_stream, tag, serial,
|
||||
actual_store, actual_arch, actual_region, ami, aki, ari,
|
||||
actual_virt) = row
|
||||
actual = (actual_release, actual_stream, actual_store, actual_arch,
|
||||
actual_region, actual_virt)
|
||||
if actual == expected:
|
||||
# aki and ari are sometimes blank
|
||||
if aki == '':
|
||||
aki = None
|
||||
if ari == '':
|
||||
ari = None
|
||||
return (ami, aki, ari, tag, serial)
|
||||
|
||||
raise KeyError()
|
||||
|
||||
|
||||
def get_ubuntu_url(release, stream):
|
||||
url = "https://cloud-images.ubuntu.com/query/%s/%s/released.current.txt"
|
||||
return url % (release, stream)
|
||||
|
||||
|
||||
def main():
|
||||
arg_spec = dict(
|
||||
distro=dict(required=True, choices=SUPPORTED_DISTROS),
|
||||
release=dict(required=True),
|
||||
stream=dict(required=False, default='server',
|
||||
choices=['desktop', 'server']),
|
||||
store=dict(required=False, default='ebs',
|
||||
choices=['ebs', 'instance-store']),
|
||||
arch=dict(required=False, default='amd64',
|
||||
choices=['i386', 'amd64']),
|
||||
region=dict(required=False, default='us-east-1', choices=AWS_REGIONS),
|
||||
virt=dict(required=False, default='paravirtual',
|
||||
choices=['paravirtual', 'hvm'])
|
||||
)
|
||||
module = AnsibleModule(argument_spec=arg_spec)
|
||||
distro = module.params['distro']
|
||||
|
||||
if distro == 'ubuntu':
|
||||
ubuntu(module)
|
||||
else:
|
||||
module.fail_json(msg="Unsupported distro: %s" % distro)
|
||||
|
||||
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
219
cloud/ec2_asg
Normal file
219
cloud/ec2_asg
Normal file
|
@ -0,0 +1,219 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: ec2_asg
|
||||
short_description: Create or delete AWS Autoscaling Groups
|
||||
description:
|
||||
- Can create or delete AWS Autoscaling Groups
|
||||
- Works with the ec2_lc module to manage Launch Configurations
|
||||
version_added: "1.6"
|
||||
author: Gareth Rushgrove
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- register or deregister the instance
|
||||
required: true
|
||||
choices: ['present', 'absent']
|
||||
name:
|
||||
description:
|
||||
- Unique name for group to be created or deleted
|
||||
required: true
|
||||
load_balancers:
|
||||
description:
|
||||
- List of ELB names to use for the group
|
||||
required: false
|
||||
availability_zones:
|
||||
description:
|
||||
- List of availability zone names in which to create the group.
|
||||
required: false
|
||||
launch_config_name:
|
||||
description:
|
||||
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
|
||||
required: false
|
||||
min_size:
|
||||
description:
|
||||
- Minimum number of instances in group
|
||||
required: false
|
||||
max_size:
|
||||
description:
|
||||
- Maximum number of instances in group
|
||||
required: false
|
||||
desired_capacity:
|
||||
description:
|
||||
- Desired number of instances in group
|
||||
required: false
|
||||
region:
|
||||
description:
|
||||
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
|
||||
required: false
|
||||
aliases: ['aws_region', 'ec2_region']
|
||||
vpc_zone_identifier:
|
||||
description:
|
||||
- List of VPC subnets to use
|
||||
required: false
|
||||
default: None
|
||||
extends_documentation_fragment: aws
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
- ec2_asg:
|
||||
name: special
|
||||
load_balancers: 'lb1,lb2'
|
||||
availability_zones: 'eu-west-1a,eu-west-1b'
|
||||
launch_config_name: 'lc-1'
|
||||
min_size: 1
|
||||
max_size: 10
|
||||
desired_capacity: 5
|
||||
vpc_zone_identifier: 'subnet-abcd1234,subnet-1a2b3c4d'
|
||||
'''
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
try:
|
||||
import boto.ec2.autoscale
|
||||
from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup
|
||||
from boto.exception import BotoServerError
|
||||
except ImportError:
|
||||
print "failed=True msg='boto required for this module'"
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def enforce_required_arguments(module):
|
||||
''' As many arguments are not required for autoscale group deletion
|
||||
they cannot be mandatory arguments for the module, so we enforce
|
||||
them here '''
|
||||
missing_args = []
|
||||
for arg in ('min_size', 'max_size', 'launch_config_name', 'availability_zones'):
|
||||
if module.params[arg] is None:
|
||||
missing_args.append(arg)
|
||||
if missing_args:
|
||||
module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args))
|
||||
|
||||
|
||||
def create_autoscaling_group(connection, module):
|
||||
enforce_required_arguments(module)
|
||||
|
||||
group_name = module.params.get('name')
|
||||
load_balancers = module.params['load_balancers']
|
||||
availability_zones = module.params['availability_zones']
|
||||
launch_config_name = module.params.get('launch_config_name')
|
||||
min_size = module.params['min_size']
|
||||
max_size = module.params['max_size']
|
||||
desired_capacity = module.params.get('desired_capacity')
|
||||
vpc_zone_identifier = module.params.get('vpc_zone_identifier')
|
||||
|
||||
launch_configs = connection.get_all_launch_configurations(names=[launch_config_name])
|
||||
|
||||
as_groups = connection.get_all_groups(names=[group_name])
|
||||
|
||||
if not as_groups:
|
||||
ag = AutoScalingGroup(
|
||||
group_name=group_name,
|
||||
load_balancers=load_balancers,
|
||||
availability_zones=availability_zones,
|
||||
launch_config=launch_configs[0],
|
||||
min_size=min_size,
|
||||
max_size=max_size,
|
||||
desired_capacity=desired_capacity,
|
||||
vpc_zone_identifier=vpc_zone_identifier,
|
||||
connection=connection)
|
||||
|
||||
try:
|
||||
connection.create_auto_scaling_group(ag)
|
||||
module.exit_json(changed=True)
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
as_group = as_groups[0]
|
||||
changed = False
|
||||
for attr in ('launch_config_name', 'max_size', 'min_size', 'desired_capacity',
|
||||
'vpc_zone_identifier', 'availability_zones'):
|
||||
if getattr(as_group, attr) != module.params.get(attr):
|
||||
changed = True
|
||||
setattr(as_group, attr, module.params.get(attr))
|
||||
# handle loadbalancers separately because None != []
|
||||
load_balancers = module.params.get('load_balancers') or []
|
||||
if as_group.load_balancers != load_balancers:
|
||||
changed = True
|
||||
as_group.load_balancers = module.params.get('load_balancers')
|
||||
|
||||
try:
|
||||
if changed:
|
||||
as_group.update()
|
||||
module.exit_json(changed=changed)
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def delete_autoscaling_group(connection, module):
|
||||
group_name = module.params.get('name')
|
||||
groups = connection.get_all_groups(names=[group_name])
|
||||
if groups:
|
||||
group = groups[0]
|
||||
group.shutdown_instances()
|
||||
|
||||
instances = True
|
||||
while instances:
|
||||
connection.get_all_groups()
|
||||
for group in groups:
|
||||
if group.name == group_name:
|
||||
if not group.instances:
|
||||
instances = False
|
||||
time.sleep(10)
|
||||
|
||||
group.delete()
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
name=dict(required=True, type='str'),
|
||||
load_balancers=dict(type='list'),
|
||||
availability_zones=dict(type='list'),
|
||||
launch_config_name=dict(type='str'),
|
||||
min_size=dict(type='int'),
|
||||
max_size=dict(type='int'),
|
||||
desired_capacity=dict(type='int'),
|
||||
vpc_zone_identifier=dict(type='str'),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
state = module.params.get('state')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if state == 'present':
|
||||
create_autoscaling_group(connection, module)
|
||||
elif state == 'absent':
|
||||
delete_autoscaling_group(connection, module)
|
||||
|
||||
main()
|
|
@ -23,24 +23,6 @@ options:
|
|||
required: false
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
ec2_url:
|
||||
description:
|
||||
- URL to use to connect to EC2-compatible cloud (by default the module will use EC2 endpoints)
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ EC2_URL ]
|
||||
ec2_access_key:
|
||||
description:
|
||||
- EC2 access key. If not specified then the EC2_ACCESS_KEY environment variable is used.
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ EC2_ACCESS_KEY ]
|
||||
ec2_secret_key:
|
||||
description:
|
||||
- EC2 secret key. If not specified then the EC2_SECRET_KEY environment variable is used.
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ EC2_SECRET_KEY ]
|
||||
region:
|
||||
description:
|
||||
- the EC2 region to use
|
||||
|
@ -53,16 +35,14 @@ options:
|
|||
required: false
|
||||
default: false
|
||||
version_added: "1.4"
|
||||
validate_certs:
|
||||
reuse_existing_ip_allowed:
|
||||
description:
|
||||
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
|
||||
- Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one.
|
||||
required: false
|
||||
default: "yes"
|
||||
choices: ["yes", "no"]
|
||||
aliases: []
|
||||
version_added: "1.5"
|
||||
default: false
|
||||
version_added: "1.6"
|
||||
|
||||
requirements: [ "boto" ]
|
||||
extends_documentation_fragment: aws
|
||||
author: Lorin Hochstein <lorin@nimbisservices.com>
|
||||
notes:
|
||||
- This module will return C(public_ip) on success, which will contain the
|
||||
|
@ -175,13 +155,27 @@ def ip_is_associated_with_instance(ec2, public_ip, instance_id, module):
|
|||
return False
|
||||
|
||||
|
||||
def allocate_address(ec2, domain, module):
|
||||
""" Allocate a new elastic IP address and return it """
|
||||
def allocate_address(ec2, domain, module, reuse_existing_ip_allowed):
|
||||
""" Allocate a new elastic IP address (when needed) and return it """
|
||||
# If we're in check mode, nothing else to do
|
||||
if module.check_mode:
|
||||
module.exit_json(change=True)
|
||||
|
||||
address = ec2.allocate_address(domain=domain)
|
||||
if reuse_existing_ip_allowed:
|
||||
if domain:
|
||||
domain_filter = { 'domain' : domain }
|
||||
else:
|
||||
domain_filter = { 'domain' : 'standard' }
|
||||
all_addresses = ec2.get_all_addresses(filters=domain_filter)
|
||||
|
||||
unassociated_addresses = filter(lambda a: a.instance_id is None, all_addresses)
|
||||
if unassociated_addresses:
|
||||
address = unassociated_addresses[0];
|
||||
else:
|
||||
address = ec2.allocate_address(domain=domain)
|
||||
else:
|
||||
address = ec2.allocate_address(domain=domain)
|
||||
|
||||
return address
|
||||
|
||||
|
||||
|
@ -224,7 +218,8 @@ def main():
|
|||
public_ip = dict(required=False, aliases= ['ip']),
|
||||
state = dict(required=False, default='present',
|
||||
choices=['present', 'absent']),
|
||||
in_vpc = dict(required=False, choices=BOOLEANS, default=False),
|
||||
in_vpc = dict(required=False, type='bool', default=False),
|
||||
reuse_existing_ip_allowed = dict(required=False, type='bool', default=False),
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -243,18 +238,19 @@ def main():
|
|||
state = module.params.get('state')
|
||||
in_vpc = module.params.get('in_vpc')
|
||||
domain = "vpc" if in_vpc else None
|
||||
reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed');
|
||||
|
||||
if state == 'present':
|
||||
if public_ip is None:
|
||||
if instance_id is None:
|
||||
address = allocate_address(ec2, domain, module)
|
||||
address = allocate_address(ec2, domain, module, reuse_existing_ip_allowed)
|
||||
module.exit_json(changed=True, public_ip=address.public_ip)
|
||||
else:
|
||||
# Determine if the instance is inside a VPC or not
|
||||
instance = find_instance(ec2, instance_id, module)
|
||||
if instance.vpc_id != None:
|
||||
domain = "vpc"
|
||||
address = allocate_address(ec2, domain, module)
|
||||
address = allocate_address(ec2, domain, module, reuse_existing_ip_allowed)
|
||||
else:
|
||||
address = find_address(ec2, public_ip, module)
|
||||
associate_ip_and_instance(ec2, address, instance_id, module)
|
||||
|
|
|
@ -25,7 +25,6 @@ description:
|
|||
if state=absent is passed as an argument.
|
||||
- Will be marked changed when called only if there are ELBs found to operate on.
|
||||
version_added: "1.2"
|
||||
requirements: [ "boto" ]
|
||||
author: John Jarvis
|
||||
options:
|
||||
state:
|
||||
|
@ -33,29 +32,15 @@ options:
|
|||
- register or deregister the instance
|
||||
required: true
|
||||
choices: ['present', 'absent']
|
||||
|
||||
instance_id:
|
||||
description:
|
||||
- EC2 Instance ID
|
||||
required: true
|
||||
|
||||
ec2_elbs:
|
||||
description:
|
||||
- List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
|
||||
required: false
|
||||
default: None
|
||||
aws_secret_key:
|
||||
description:
|
||||
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
|
||||
required: false
|
||||
default: None
|
||||
aliases: ['ec2_secret_key', 'secret_key' ]
|
||||
aws_access_key:
|
||||
description:
|
||||
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
|
||||
required: false
|
||||
default: None
|
||||
aliases: ['ec2_access_key', 'access_key' ]
|
||||
region:
|
||||
description:
|
||||
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
|
||||
|
@ -82,7 +67,13 @@ options:
|
|||
choices: ["yes", "no"]
|
||||
aliases: []
|
||||
version_added: "1.5"
|
||||
|
||||
wait_timeout:
|
||||
description:
|
||||
- Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no.
|
||||
required: false
|
||||
default: 0
|
||||
version_added: "1.6"
|
||||
extends_documentation_fragment: aws
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
|
@ -124,16 +115,15 @@ class ElbManager:
|
|||
"""Handles EC2 instance ELB registration and de-registration"""
|
||||
|
||||
def __init__(self, module, instance_id=None, ec2_elbs=None,
|
||||
aws_access_key=None, aws_secret_key=None, region=None):
|
||||
self.aws_access_key = aws_access_key
|
||||
self.aws_secret_key = aws_secret_key
|
||||
region=None, **aws_connect_params):
|
||||
self.module = module
|
||||
self.instance_id = instance_id
|
||||
self.region = region
|
||||
self.aws_connect_params = aws_connect_params
|
||||
self.lbs = self._get_instance_lbs(ec2_elbs)
|
||||
self.changed = False
|
||||
|
||||
def deregister(self, wait):
|
||||
def deregister(self, wait, timeout):
|
||||
"""De-register the instance from all ELBs and wait for the ELB
|
||||
to report it out-of-service"""
|
||||
|
||||
|
@ -146,18 +136,17 @@ class ElbManager:
|
|||
return
|
||||
|
||||
if wait:
|
||||
self._await_elb_instance_state(lb, 'OutOfService', initial_state)
|
||||
self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout)
|
||||
else:
|
||||
# We cannot assume no change was made if we don't wait
|
||||
# to find out
|
||||
self.changed = True
|
||||
|
||||
def register(self, wait, enable_availability_zone):
|
||||
def register(self, wait, enable_availability_zone, timeout):
|
||||
"""Register the instance for all ELBs and wait for the ELB
|
||||
to report the instance in-service"""
|
||||
for lb in self.lbs:
|
||||
if wait:
|
||||
initial_state = self._get_instance_health(lb)
|
||||
initial_state = self._get_instance_health(lb)
|
||||
|
||||
if enable_availability_zone:
|
||||
self._enable_availailability_zone(lb)
|
||||
|
@ -165,7 +154,7 @@ class ElbManager:
|
|||
lb.register_instances([self.instance_id])
|
||||
|
||||
if wait:
|
||||
self._await_elb_instance_state(lb, 'InService', initial_state)
|
||||
self._await_elb_instance_state(lb, 'InService', initial_state, timeout)
|
||||
else:
|
||||
# We cannot assume no change was made if we don't wait
|
||||
# to find out
|
||||
|
@ -195,10 +184,12 @@ class ElbManager:
|
|||
# lb.availability_zones
|
||||
return instance.placement in lb.availability_zones
|
||||
|
||||
def _await_elb_instance_state(self, lb, awaited_state, initial_state):
|
||||
def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout):
|
||||
"""Wait for an ELB to change state
|
||||
lb: load balancer
|
||||
awaited_state : state to poll for (string)"""
|
||||
|
||||
wait_timeout = time.time() + timeout
|
||||
while True:
|
||||
instance_state = self._get_instance_health(lb)
|
||||
|
||||
|
@ -217,7 +208,8 @@ class ElbManager:
|
|||
# If it's pending, we'll skip further checks andd continue waiting
|
||||
pass
|
||||
elif (awaited_state == 'InService'
|
||||
and instance_state.reason_code == "Instance"):
|
||||
and instance_state.reason_code == "Instance"
|
||||
and time.time() >= wait_timeout):
|
||||
# If the reason_code for the instance being out of service is
|
||||
# "Instance" this indicates a failure state, e.g. the instance
|
||||
# has failed a health check or the ELB does not have the
|
||||
|
@ -262,9 +254,8 @@ class ElbManager:
|
|||
are attached to self.instance_id"""
|
||||
|
||||
try:
|
||||
endpoint="elasticloadbalancing.%s.amazonaws.com" % self.region
|
||||
connect_region = RegionInfo(name=self.region, endpoint=endpoint)
|
||||
elb = boto.ec2.elb.ELBConnection(self.aws_access_key, self.aws_secret_key, region=connect_region)
|
||||
elb = connect_to_aws(boto.ec2.elb, self.region,
|
||||
**self.aws_connect_params)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
self.module.fail_json(msg=str(e))
|
||||
|
||||
|
@ -283,23 +274,22 @@ class ElbManager:
|
|||
def _get_instance(self):
|
||||
"""Returns a boto.ec2.InstanceObject for self.instance_id"""
|
||||
try:
|
||||
endpoint = "ec2.%s.amazonaws.com" % self.region
|
||||
connect_region = RegionInfo(name=self.region, endpoint=endpoint)
|
||||
ec2_conn = boto.ec2.EC2Connection(self.aws_access_key, self.aws_secret_key, region=connect_region)
|
||||
ec2 = connect_to_aws(boto.ec2, self.region,
|
||||
**self.aws_connect_params)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
self.module.fail_json(msg=str(e))
|
||||
return ec2_conn.get_only_instances(instance_ids=[self.instance_id])[0]
|
||||
return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state={'required': True,
|
||||
'choices': ['present', 'absent']},
|
||||
state={'required': True},
|
||||
instance_id={'required': True},
|
||||
ec2_elbs={'default': None, 'required': False, 'type':'list'},
|
||||
enable_availability_zone={'default': True, 'required': False, 'choices': BOOLEANS, 'type': 'bool'},
|
||||
wait={'required': False, 'choices': BOOLEANS, 'default': True, 'type': 'bool'}
|
||||
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
|
||||
wait={'required': False, 'default': True, 'type': 'bool'},
|
||||
wait_timeout={'requred': False, 'default': 0, 'type': 'int'}
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -307,21 +297,22 @@ def main():
|
|||
argument_spec=argument_spec,
|
||||
)
|
||||
|
||||
# def get_ec2_creds(module):
|
||||
# return ec2_url, ec2_access_key, ec2_secret_key, region
|
||||
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if not region:
|
||||
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
|
||||
|
||||
ec2_elbs = module.params['ec2_elbs']
|
||||
region = module.params['region']
|
||||
wait = module.params['wait']
|
||||
enable_availability_zone = module.params['enable_availability_zone']
|
||||
timeout = module.params['wait_timeout']
|
||||
|
||||
if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
|
||||
module.fail_json(msg="ELBs are required for registration")
|
||||
|
||||
instance_id = module.params['instance_id']
|
||||
elb_man = ElbManager(module, instance_id, ec2_elbs, aws_access_key,
|
||||
aws_secret_key, region=region)
|
||||
elb_man = ElbManager(module, instance_id, ec2_elbs,
|
||||
region=region, **aws_connect_params)
|
||||
|
||||
if ec2_elbs is not None:
|
||||
for elb in ec2_elbs:
|
||||
|
@ -330,9 +321,9 @@ def main():
|
|||
module.fail_json(msg=msg)
|
||||
|
||||
if module.params['state'] == 'present':
|
||||
elb_man.register(wait, enable_availability_zone)
|
||||
elb_man.register(wait, enable_availability_zone, timeout)
|
||||
elif module.params['state'] == 'absent':
|
||||
elb_man.deregister(wait)
|
||||
elb_man.deregister(wait, timeout)
|
||||
|
||||
ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
|
||||
ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
|
||||
|
|
|
@ -22,7 +22,6 @@ short_description: Creates or destroys Amazon ELB.
|
|||
- Returns information about the load balancer.
|
||||
- Will be marked changed when called only if state is changed.
|
||||
version_added: "1.5"
|
||||
requirements: [ "boto" ]
|
||||
author: Jim Dalton
|
||||
options:
|
||||
state:
|
||||
|
@ -51,37 +50,23 @@ options:
|
|||
- Purge existing availability zones on ELB that are not found in zones
|
||||
required: false
|
||||
default: false
|
||||
security_group_ids:
|
||||
description:
|
||||
- A list of security groups to apply to the elb
|
||||
require: false
|
||||
default: None
|
||||
version_added: "1.6"
|
||||
health_check:
|
||||
description:
|
||||
- An associative array of health check configuration settigs (see example)
|
||||
require: false
|
||||
default: None
|
||||
aws_secret_key:
|
||||
description:
|
||||
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
|
||||
required: false
|
||||
default: None
|
||||
aliases: ['ec2_secret_key', 'secret_key']
|
||||
aws_access_key:
|
||||
description:
|
||||
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
|
||||
required: false
|
||||
default: None
|
||||
aliases: ['ec2_access_key', 'access_key']
|
||||
region:
|
||||
description:
|
||||
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
|
||||
required: false
|
||||
aliases: ['aws_region', 'ec2_region']
|
||||
validate_certs:
|
||||
description:
|
||||
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
|
||||
required: false
|
||||
default: "yes"
|
||||
choices: ["yes", "no"]
|
||||
aliases: []
|
||||
version_added: "1.5"
|
||||
|
||||
extends_documentation_fragment: aws
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
|
@ -183,18 +168,18 @@ class ElbManager(object):
|
|||
"""Handles ELB creation and destruction"""
|
||||
|
||||
def __init__(self, module, name, listeners=None, purge_listeners=None,
|
||||
zones=None, purge_zones=None, health_check=None,
|
||||
aws_access_key=None, aws_secret_key=None, region=None):
|
||||
zones=None, purge_zones=None, security_group_ids=None, health_check=None,
|
||||
region=None, **aws_connect_params):
|
||||
self.module = module
|
||||
self.name = name
|
||||
self.listeners = listeners
|
||||
self.purge_listeners = purge_listeners
|
||||
self.zones = zones
|
||||
self.purge_zones = purge_zones
|
||||
self.security_group_ids = security_group_ids
|
||||
self.health_check = health_check
|
||||
|
||||
self.aws_access_key = aws_access_key
|
||||
self.aws_secret_key = aws_secret_key
|
||||
self.aws_connect_params = aws_connect_params
|
||||
self.region = region
|
||||
|
||||
self.changed = False
|
||||
|
@ -209,6 +194,7 @@ class ElbManager(object):
|
|||
self._create_elb()
|
||||
else:
|
||||
self._set_zones()
|
||||
self._set_security_groups()
|
||||
self._set_elb_listeners()
|
||||
self._set_health_check()
|
||||
|
||||
|
@ -228,6 +214,7 @@ class ElbManager(object):
|
|||
'name': self.elb.name,
|
||||
'dns_name': self.elb.dns_name,
|
||||
'zones': self.elb.availability_zones,
|
||||
'security_group_ids': self.elb.security_groups,
|
||||
'status': self.status
|
||||
}
|
||||
|
||||
|
@ -262,11 +249,8 @@ class ElbManager(object):
|
|||
|
||||
def _get_elb_connection(self):
|
||||
try:
|
||||
endpoint = "elasticloadbalancing.%s.amazonaws.com" % self.region
|
||||
connect_region = RegionInfo(name=self.region, endpoint=endpoint)
|
||||
return boto.ec2.elb.ELBConnection(self.aws_access_key,
|
||||
self.aws_secret_key,
|
||||
region=connect_region)
|
||||
return connect_to_aws(boto.ec2.elb, self.region,
|
||||
**self.aws_connect_params)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
self.module.fail_json(msg=str(e))
|
||||
|
||||
|
@ -281,6 +265,7 @@ class ElbManager(object):
|
|||
listeners = [self._listener_as_tuple(l) for l in self.listeners]
|
||||
self.elb = self.elb_conn.create_load_balancer(name=self.name,
|
||||
zones=self.zones,
|
||||
security_groups=self.security_group_ids,
|
||||
complex_listeners=listeners)
|
||||
if self.elb:
|
||||
self.changed = True
|
||||
|
@ -405,6 +390,11 @@ class ElbManager(object):
|
|||
if zones_to_disable:
|
||||
self._disable_zones(zones_to_disable)
|
||||
|
||||
def _set_security_groups(self):
|
||||
if self.security_group_ids != None and set(self.elb.security_groups) != set(self.security_group_ids):
|
||||
self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
|
||||
self.Changed = True
|
||||
|
||||
def _set_health_check(self):
|
||||
"""Set health check values on ELB as needed"""
|
||||
if self.health_check:
|
||||
|
@ -452,11 +442,10 @@ def main():
|
|||
state={'required': True, 'choices': ['present', 'absent']},
|
||||
name={'required': True},
|
||||
listeners={'default': None, 'required': False, 'type': 'list'},
|
||||
purge_listeners={'default': True, 'required': False,
|
||||
'choices': BOOLEANS, 'type': 'bool'},
|
||||
purge_listeners={'default': True, 'required': False, 'type': 'bool'},
|
||||
zones={'default': None, 'required': False, 'type': 'list'},
|
||||
purge_zones={'default': False, 'required': False,
|
||||
'choices': BOOLEANS, 'type': 'bool'},
|
||||
purge_zones={'default': False, 'required': False, 'type': 'bool'},
|
||||
security_group_ids={'default': None, 'required': False, 'type': 'list'},
|
||||
health_check={'default': None, 'required': False, 'type': 'dict'},
|
||||
)
|
||||
)
|
||||
|
@ -465,9 +454,9 @@ def main():
|
|||
argument_spec=argument_spec,
|
||||
)
|
||||
|
||||
# def get_ec2_creds(module):
|
||||
# return ec2_url, ec2_access_key, ec2_secret_key, region
|
||||
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
if not region:
|
||||
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
|
||||
|
||||
name = module.params['name']
|
||||
state = module.params['state']
|
||||
|
@ -475,6 +464,7 @@ def main():
|
|||
purge_listeners = module.params['purge_listeners']
|
||||
zones = module.params['zones']
|
||||
purge_zones = module.params['purge_zones']
|
||||
security_group_ids = module.params['security_group_ids']
|
||||
health_check = module.params['health_check']
|
||||
|
||||
if state == 'present' and not listeners:
|
||||
|
@ -484,8 +474,8 @@ def main():
|
|||
module.fail_json(msg="At least one availability zone is required for ELB creation")
|
||||
|
||||
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
|
||||
purge_zones, health_check, aws_access_key,
|
||||
aws_secret_key, region=region)
|
||||
purge_zones, security_group_ids, health_check,
|
||||
region=region, **aws_connect_params)
|
||||
|
||||
if state == 'present':
|
||||
elb_man.ensure_ok()
|
||||
|
|
|
@ -21,7 +21,15 @@ DOCUMENTATION = '''
|
|||
module: ec2_facts
|
||||
short_description: Gathers facts about remote hosts within ec2 (aws)
|
||||
version_added: "1.0"
|
||||
options: {}
|
||||
options:
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
version_added: 1.5.1
|
||||
description:
|
||||
- This module fetches data from the metadata servers in ec2 (aws).
|
||||
Eucalyptus cloud provides a similar service and this module should
|
||||
|
@ -41,7 +49,6 @@ EXAMPLES = '''
|
|||
when: ansible_ec2_instance_type == "t1.micro"
|
||||
'''
|
||||
|
||||
import urllib2
|
||||
import socket
|
||||
import re
|
||||
|
||||
|
@ -62,7 +69,8 @@ class Ec2Metadata(object):
|
|||
'us-west-1',
|
||||
'us-west-2')
|
||||
|
||||
def __init__(self, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None):
|
||||
def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None):
|
||||
self.module = module
|
||||
self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri
|
||||
self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri
|
||||
self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri
|
||||
|
@ -70,12 +78,12 @@ class Ec2Metadata(object):
|
|||
self._prefix = 'ansible_ec2_%s'
|
||||
|
||||
def _fetch(self, url):
|
||||
try:
|
||||
return urllib2.urlopen(url).read()
|
||||
except urllib2.HTTPError:
|
||||
return
|
||||
except urllib2.URLError:
|
||||
return
|
||||
(response, info) = fetch_url(self.module, url, force=True)
|
||||
if response:
|
||||
data = response.read()
|
||||
else:
|
||||
data = None
|
||||
return data
|
||||
|
||||
def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']):
|
||||
new_fields = {}
|
||||
|
@ -150,17 +158,20 @@ class Ec2Metadata(object):
|
|||
return data
|
||||
|
||||
def main():
|
||||
|
||||
ec2_facts = Ec2Metadata().run()
|
||||
ec2_facts_result = dict(changed=False, ansible_facts=ec2_facts)
|
||||
argument_spec = url_argument_spec()
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(),
|
||||
argument_spec = argument_spec,
|
||||
supports_check_mode = True,
|
||||
)
|
||||
|
||||
ec2_facts = Ec2Metadata(module).run()
|
||||
ec2_facts_result = dict(changed=False, ansible_facts=ec2_facts)
|
||||
|
||||
module.exit_json(**ec2_facts_result)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
|
||||
main()
|
||||
|
|
184
cloud/ec2_group
184
cloud/ec2_group
|
@ -24,32 +24,19 @@ options:
|
|||
required: false
|
||||
rules:
|
||||
description:
|
||||
- List of firewall rules to enforce in this group (see example).
|
||||
required: true
|
||||
- List of firewall inbound rules to enforce in this group (see example).
|
||||
required: false
|
||||
rules_egress:
|
||||
description:
|
||||
- List of firewall outbound rules to enforce in this group (see example).
|
||||
required: false
|
||||
version_added: "1.6"
|
||||
region:
|
||||
description:
|
||||
- the EC2 region to use
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
ec2_url:
|
||||
description:
|
||||
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints)
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
ec2_secret_key:
|
||||
description:
|
||||
- EC2 secret key
|
||||
required: false
|
||||
default: null
|
||||
aliases: ['aws_secret_key']
|
||||
ec2_access_key:
|
||||
description:
|
||||
- EC2 access key
|
||||
required: false
|
||||
default: null
|
||||
aliases: ['aws_access_key']
|
||||
state:
|
||||
version_added: "1.4"
|
||||
description:
|
||||
|
@ -57,16 +44,13 @@ options:
|
|||
required: false
|
||||
default: 'present'
|
||||
aliases: []
|
||||
validate_certs:
|
||||
description:
|
||||
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
|
||||
required: false
|
||||
default: "yes"
|
||||
choices: ["yes", "no"]
|
||||
aliases: []
|
||||
version_added: "1.5"
|
||||
|
||||
requirements: [ "boto" ]
|
||||
extends_documentation_fragment: aws
|
||||
|
||||
notes:
|
||||
- If a rule declares a group_name and that group doesn't exist, it will be
|
||||
automatically created. In that case, group_desc should be provided as well.
|
||||
The module will refuse to create a depended-on group without a description.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -99,6 +83,13 @@ EXAMPLES = '''
|
|||
- proto: all
|
||||
# the containing group name may be specified here
|
||||
group_name: example
|
||||
rules_egress:
|
||||
- proto: tcp
|
||||
from_port: 80
|
||||
to_port: 80
|
||||
group_name: example-other
|
||||
# description to use if example-other needs to be created
|
||||
group_desc: other example EC2 group
|
||||
'''
|
||||
|
||||
try:
|
||||
|
@ -114,6 +105,55 @@ def addRulesToLookup(rules, prefix, dict):
|
|||
dict["%s-%s-%s-%s-%s-%s" % (prefix, rule.ip_protocol, rule.from_port, rule.to_port,
|
||||
grant.group_id, grant.cidr_ip)] = rule
|
||||
|
||||
|
||||
def get_target_from_rule(rule, name, groups):
|
||||
"""
|
||||
Returns tuple of (group_id, ip) after validating rule params.
|
||||
|
||||
rule: Dict describing a rule.
|
||||
name: Name of the security group being managed.
|
||||
groups: Dict of all available security groups.
|
||||
|
||||
AWS accepts an ip range or a security group as target of a rule. This
|
||||
function validate the rule specification and return either a non-None
|
||||
group_id or a non-None ip range.
|
||||
"""
|
||||
|
||||
group_id = None
|
||||
group_name = None
|
||||
ip = None
|
||||
target_group_created = False
|
||||
if 'group_id' in rule and 'cidr_ip' in rule:
|
||||
module.fail_json(msg="Specify group_id OR cidr_ip, not both")
|
||||
elif 'group_name' in rule and 'cidr_ip' in rule:
|
||||
module.fail_json(msg="Specify group_name OR cidr_ip, not both")
|
||||
elif 'group_id' in rule and 'group_name' in rule:
|
||||
module.fail_json(msg="Specify group_id OR group_name, not both")
|
||||
elif 'group_id' in rule:
|
||||
group_id = rule['group_id']
|
||||
elif 'group_name' in rule:
|
||||
group_name = rule['group_name']
|
||||
if group_name in groups:
|
||||
group_id = groups[group_name].id
|
||||
elif group_name == name:
|
||||
group_id = group.id
|
||||
groups[group_id] = group
|
||||
groups[group_name] = group
|
||||
else:
|
||||
if not rule.get('group_desc', '').strip():
|
||||
module.fail_json(msg="group %s will be automatically created by rule %s and no description was provided" % (group_name, rule))
|
||||
if not module.check_mode:
|
||||
auto_group = ec2.create_security_group(group_name, rule['group_desc'], vpc_id=vpc_id)
|
||||
group_id = auto_group.id
|
||||
groups[group_id] = auto_group
|
||||
groups[group_name] = auto_group
|
||||
target_group_created = True
|
||||
elif 'cidr_ip' in rule:
|
||||
ip = rule['cidr_ip']
|
||||
|
||||
return group_id, ip, target_group_created
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
|
@ -121,6 +161,7 @@ def main():
|
|||
description=dict(required=True),
|
||||
vpc_id=dict(),
|
||||
rules=dict(),
|
||||
rules_egress=dict(),
|
||||
state = dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
)
|
||||
|
@ -133,6 +174,7 @@ def main():
|
|||
description = module.params['description']
|
||||
vpc_id = module.params['vpc_id']
|
||||
rules = module.params['rules']
|
||||
rules_egress = module.params['rules_egress']
|
||||
state = module.params.get('state')
|
||||
|
||||
changed = False
|
||||
|
@ -183,39 +225,29 @@ def main():
|
|||
'''no match found, create it'''
|
||||
if not module.check_mode:
|
||||
group = ec2.create_security_group(name, description, vpc_id=vpc_id)
|
||||
|
||||
# When a group is created, an egress_rule ALLOW ALL
|
||||
# to 0.0.0.0/0 is added automatically but it's not
|
||||
# reflected in the object returned by the AWS API
|
||||
# call. We re-read the group for getting an updated object
|
||||
group = ec2.get_all_security_groups(group_ids=(group.id,))[0]
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg="Unsupported state requested: %s" % state)
|
||||
|
||||
# create a lookup for all existing rules on the group
|
||||
if group:
|
||||
|
||||
# Manage ingress rules
|
||||
groupRules = {}
|
||||
addRulesToLookup(group.rules, 'in', groupRules)
|
||||
|
||||
# Now, go through all provided rules and ensure they are there.
|
||||
if rules:
|
||||
for rule in rules:
|
||||
group_id = None
|
||||
group_name = None
|
||||
ip = None
|
||||
if 'group_id' in rule and 'cidr_ip' in rule:
|
||||
module.fail_json(msg="Specify group_id OR cidr_ip, not both")
|
||||
elif 'group_name' in rule and 'cidr_ip' in rule:
|
||||
module.fail_json(msg="Specify group_name OR cidr_ip, not both")
|
||||
elif 'group_id' in rule and 'group_name' in rule:
|
||||
module.fail_json(msg="Specify group_id OR group_name, not both")
|
||||
elif 'group_id' in rule:
|
||||
group_id = rule['group_id']
|
||||
elif 'group_name' in rule:
|
||||
group_name = rule['group_name']
|
||||
if group_name in groups:
|
||||
group_id = groups[group_name].id
|
||||
elif group_name == name:
|
||||
group_id = group.id
|
||||
groups[group_id] = group
|
||||
groups[group_name] = group
|
||||
elif 'cidr_ip' in rule:
|
||||
ip = rule['cidr_ip']
|
||||
group_id, ip, target_group_created = get_target_from_rule(rule, name, groups)
|
||||
if target_group_created:
|
||||
changed = True
|
||||
|
||||
if rule['proto'] == 'all':
|
||||
rule['proto'] = -1
|
||||
|
@ -246,6 +278,58 @@ def main():
|
|||
group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup)
|
||||
changed = True
|
||||
|
||||
# Manage egress rules
|
||||
groupRules = {}
|
||||
addRulesToLookup(group.rules_egress, 'out', groupRules)
|
||||
|
||||
# Now, go through all provided rules and ensure they are there.
|
||||
if rules_egress:
|
||||
for rule in rules_egress:
|
||||
group_id, ip, target_group_created = get_target_from_rule(rule, name, groups)
|
||||
if target_group_created:
|
||||
changed = True
|
||||
|
||||
if rule['proto'] == 'all':
|
||||
rule['proto'] = -1
|
||||
rule['from_port'] = None
|
||||
rule['to_port'] = None
|
||||
|
||||
# If rule already exists, don't later delete it
|
||||
ruleId = "%s-%s-%s-%s-%s-%s" % ('out', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip)
|
||||
if ruleId in groupRules:
|
||||
del groupRules[ruleId]
|
||||
# Otherwise, add new rule
|
||||
else:
|
||||
grantGroup = None
|
||||
if group_id:
|
||||
grantGroup = groups[group_id].id
|
||||
|
||||
if not module.check_mode:
|
||||
ec2.authorize_security_group_egress(
|
||||
group_id=group.id,
|
||||
ip_protocol=rule['proto'],
|
||||
from_port=rule['from_port'],
|
||||
to_port=rule['to_port'],
|
||||
src_group_id=grantGroup,
|
||||
cidr_ip=ip)
|
||||
changed = True
|
||||
|
||||
# Finally, remove anything left in the groupRules -- these will be defunct rules
|
||||
for rule in groupRules.itervalues():
|
||||
for grant in rule.grants:
|
||||
grantGroup = None
|
||||
if grant.group_id:
|
||||
grantGroup = groups[grant.group_id].id
|
||||
if not module.check_mode:
|
||||
ec2.revoke_security_group_egress(
|
||||
group_id=group.id,
|
||||
ip_protocol=rule.ip_protocol,
|
||||
from_port=rule.from_port,
|
||||
to_port=rule.to_port,
|
||||
src_group_id=grantGroup,
|
||||
cidr_ip=grant.cidr_ip)
|
||||
changed = True
|
||||
|
||||
if group:
|
||||
module.exit_json(changed=changed, group_id=group.id)
|
||||
else:
|
||||
|
|
109
cloud/ec2_key
109
cloud/ec2_key
|
@ -24,40 +24,28 @@ options:
|
|||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
ec2_url:
|
||||
description:
|
||||
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints)
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
ec2_secret_key:
|
||||
description:
|
||||
- EC2 secret key
|
||||
required: false
|
||||
default: null
|
||||
aliases: ['aws_secret_key', 'secret_key']
|
||||
ec2_access_key:
|
||||
description:
|
||||
- EC2 access key
|
||||
required: false
|
||||
default: null
|
||||
aliases: ['aws_access_key', 'access_key']
|
||||
state:
|
||||
description:
|
||||
- create or delete keypair
|
||||
required: false
|
||||
default: 'present'
|
||||
aliases: []
|
||||
validate_certs:
|
||||
wait:
|
||||
description:
|
||||
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
|
||||
- Wait for the specified action to complete before returning.
|
||||
required: false
|
||||
default: "yes"
|
||||
choices: ["yes", "no"]
|
||||
default: false
|
||||
aliases: []
|
||||
version_added: "1.5"
|
||||
version_added: "1.6"
|
||||
wait_timeout:
|
||||
description:
|
||||
- How long before wait gives up, in seconds
|
||||
required: false
|
||||
default: 300
|
||||
aliases: []
|
||||
version_added: "1.6"
|
||||
|
||||
requirements: [ "boto" ]
|
||||
extends_documentation_fragment: aws
|
||||
author: Vincent Viallet
|
||||
'''
|
||||
|
||||
|
@ -104,12 +92,18 @@ except ImportError:
|
|||
print "failed=True msg='boto required for this module'"
|
||||
sys.exit(1)
|
||||
|
||||
import random
|
||||
import string
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
name=dict(required=True),
|
||||
key_material=dict(required=False),
|
||||
state = dict(default='present', choices=['present', 'absent']),
|
||||
wait = dict(type='bool', default=False),
|
||||
wait_timeout = dict(default=300),
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(
|
||||
|
@ -120,6 +114,8 @@ def main():
|
|||
name = module.params['name']
|
||||
state = module.params.get('state')
|
||||
key_material = module.params.get('key_material')
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
|
||||
changed = False
|
||||
|
||||
|
@ -134,6 +130,16 @@ def main():
|
|||
'''found a match, delete it'''
|
||||
try:
|
||||
key.delete()
|
||||
if wait:
|
||||
start = time.time()
|
||||
action_complete = False
|
||||
while (time.time() - start) < wait_timeout:
|
||||
if not ec2.get_key_pair(name):
|
||||
action_complete = True
|
||||
break
|
||||
time.sleep(1)
|
||||
if not action_complete:
|
||||
module.fail_json(msg="timed out while waiting for the key to be removed")
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e))
|
||||
else:
|
||||
|
@ -145,10 +151,45 @@ def main():
|
|||
# Ensure requested key is present
|
||||
elif state == 'present':
|
||||
if key:
|
||||
'''existing key found'''
|
||||
# Should check if the fingerprint is the same - but lack of info
|
||||
# and different fingerprint provided (pub or private) depending if
|
||||
# the key has been created of imported.
|
||||
# existing key found
|
||||
if key_material:
|
||||
# EC2's fingerprints are non-trivial to generate, so push this key
|
||||
# to a temporary name and make ec2 calculate the fingerprint for us.
|
||||
#
|
||||
# http://blog.jbrowne.com/?p=23
|
||||
# https://forums.aws.amazon.com/thread.jspa?messageID=352828
|
||||
|
||||
# find an unused name
|
||||
test = 'empty'
|
||||
while test:
|
||||
randomchars = [random.choice(string.ascii_letters + string.digits) for x in range(0,10)]
|
||||
tmpkeyname = "ansible-" + ''.join(randomchars)
|
||||
test = ec2.get_key_pair(tmpkeyname)
|
||||
|
||||
# create tmp key
|
||||
tmpkey = ec2.import_key_pair(tmpkeyname, key_material)
|
||||
# get tmp key fingerprint
|
||||
tmpfingerprint = tmpkey.fingerprint
|
||||
# delete tmp key
|
||||
tmpkey.delete()
|
||||
|
||||
if key.fingerprint != tmpfingerprint:
|
||||
if not module.check_mode:
|
||||
key.delete()
|
||||
key = ec2.import_key_pair(name, key_material)
|
||||
|
||||
if wait:
|
||||
start = time.time()
|
||||
action_complete = False
|
||||
while (time.time() - start) < wait_timeout:
|
||||
if ec2.get_key_pair(name):
|
||||
action_complete = True
|
||||
break
|
||||
time.sleep(1)
|
||||
if not action_complete:
|
||||
module.fail_json(msg="timed out while waiting for the key to be re-created")
|
||||
|
||||
changed = True
|
||||
pass
|
||||
|
||||
# if the key doesn't exist, create it now
|
||||
|
@ -164,6 +205,18 @@ def main():
|
|||
retrieve the private key
|
||||
'''
|
||||
key = ec2.create_key_pair(name)
|
||||
|
||||
if wait:
|
||||
start = time.time()
|
||||
action_complete = False
|
||||
while (time.time() - start) < wait_timeout:
|
||||
if ec2.get_key_pair(name):
|
||||
action_complete = True
|
||||
break
|
||||
time.sleep(1)
|
||||
if not action_complete:
|
||||
module.fail_json(msg="timed out while waiting for the key to be created")
|
||||
|
||||
changed = True
|
||||
|
||||
if key:
|
||||
|
|
199
cloud/ec2_lc
Normal file
199
cloud/ec2_lc
Normal file
|
@ -0,0 +1,199 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: ec2_lc
|
||||
short_description: Create or delete AWS Autoscaling Launch Configurations
|
||||
description:
|
||||
- Can create or delete AwS Autoscaling Configurations
|
||||
- Works with the ec2_asg module to manage Autoscaling Groups
|
||||
version_added: "1.6"
|
||||
author: Gareth Rushgrove
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- register or deregister the instance
|
||||
required: true
|
||||
choices: ['present', 'absent']
|
||||
name:
|
||||
description:
|
||||
- Unique name for configuration
|
||||
required: true
|
||||
image_id:
|
||||
description:
|
||||
- The AMI unique identifier to be used for the group
|
||||
required: false
|
||||
key_name:
|
||||
description:
|
||||
- The SSH key name to be used for access to managed instances
|
||||
required: false
|
||||
security_groups:
|
||||
description:
|
||||
- A list of security groups into which instances should be found
|
||||
required: false
|
||||
region:
|
||||
description:
|
||||
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
|
||||
required: false
|
||||
aliases: ['aws_region', 'ec2_region']
|
||||
volumes:
|
||||
description:
|
||||
- a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
user_data:
|
||||
description:
|
||||
- opaque blob of data which is made available to the ec2 instance
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
extends_documentation_fragment: aws
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
- ec2_lc:
|
||||
name: special
|
||||
image_id: ami-XXX
|
||||
key_name: default
|
||||
security_groups: 'group,group2'
|
||||
|
||||
'''
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
try:
|
||||
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
|
||||
import boto.ec2.autoscale
|
||||
from boto.ec2.autoscale import LaunchConfiguration
|
||||
from boto.exception import BotoServerError
|
||||
except ImportError:
|
||||
print "failed=True msg='boto required for this module'"
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def create_block_device(module, volume):
|
||||
# Not aware of a way to determine this programatically
|
||||
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
|
||||
MAX_IOPS_TO_SIZE_RATIO = 30
|
||||
if 'snapshot' not in volume and 'ephemeral' not in volume:
|
||||
if 'volume_size' not in volume:
|
||||
module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
|
||||
if 'snapshot' in volume:
|
||||
if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume:
|
||||
module.fail_json(msg='io1 volumes must have an iops value set')
|
||||
if 'ephemeral' in volume:
|
||||
if 'snapshot' in volume:
|
||||
module.fail_json(msg='Cannot set both ephemeral and snapshot')
|
||||
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
|
||||
ephemeral_name=volume.get('ephemeral'),
|
||||
size=volume.get('volume_size'),
|
||||
volume_type=volume.get('device_type'),
|
||||
delete_on_termination=volume.get('delete_on_termination', False),
|
||||
iops=volume.get('iops'))
|
||||
|
||||
|
||||
def create_launch_config(connection, module):
|
||||
name = module.params.get('name')
|
||||
image_id = module.params.get('image_id')
|
||||
key_name = module.params.get('key_name')
|
||||
security_groups = module.params['security_groups']
|
||||
user_data = module.params.get('user_data')
|
||||
volumes = module.params['volumes']
|
||||
instance_type = module.params.get('instance_type')
|
||||
bdm = BlockDeviceMapping()
|
||||
|
||||
if volumes:
|
||||
for volume in volumes:
|
||||
if 'device_name' not in volume:
|
||||
module.fail_json(msg='Device name must be set for volume')
|
||||
# Minimum volume size is 1GB. We'll use volume size explicitly set to 0
|
||||
# to be a signal not to create this volume
|
||||
if 'volume_size' not in volume or int(volume['volume_size']) > 0:
|
||||
bdm[volume['device_name']] = create_block_device(module, volume)
|
||||
|
||||
lc = LaunchConfiguration(
|
||||
name=name,
|
||||
image_id=image_id,
|
||||
key_name=key_name,
|
||||
security_groups=security_groups,
|
||||
user_data=user_data,
|
||||
block_device_mappings=[bdm],
|
||||
instance_type=instance_type)
|
||||
|
||||
launch_configs = connection.get_all_launch_configurations(names=[name])
|
||||
changed = False
|
||||
if not launch_configs:
|
||||
try:
|
||||
connection.create_launch_configuration(lc)
|
||||
launch_configs = connection.get_all_launch_configurations(names=[name])
|
||||
changed = True
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=str(e))
|
||||
result = launch_configs[0]
|
||||
|
||||
module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time),
|
||||
image_id=result.image_id, arn=result.launch_configuration_arn,
|
||||
security_groups=result.security_groups, instance_type=instance_type)
|
||||
|
||||
|
||||
def delete_launch_config(connection, module):
|
||||
name = module.params.get('name')
|
||||
launch_configs = connection.get_all_launch_configurations(names=[name])
|
||||
if launch_configs:
|
||||
launch_configs[0].delete()
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
name=dict(required=True, type='str'),
|
||||
image_id=dict(type='str'),
|
||||
key_name=dict(type='str'),
|
||||
security_groups=dict(type='list'),
|
||||
user_data=dict(type='str'),
|
||||
volumes=dict(type='list'),
|
||||
instance_type=dict(type='str'),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
state = module.params.get('state')
|
||||
|
||||
if state == 'present':
|
||||
create_launch_config(connection, module)
|
||||
elif state == 'absent':
|
||||
delete_launch_config(connection, module)
|
||||
|
||||
main()
|
264
cloud/ec2_metric_alarm
Normal file
264
cloud/ec2_metric_alarm
Normal file
|
@ -0,0 +1,264 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = """
|
||||
module: ec2_metric_alarm
|
||||
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
|
||||
description:
|
||||
- Can create or delete AWS metric alarms
|
||||
- Metrics you wish to alarm on must already exist
|
||||
version_added: "1.6"
|
||||
author: Zacharie Eakin
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- register or deregister the alarm
|
||||
required: true
|
||||
choices: ['present', 'absent']
|
||||
name:
|
||||
desciption:
|
||||
- Unique name for the alarm
|
||||
required: true
|
||||
metric:
|
||||
description:
|
||||
- Name of the monitored metric (e.g. CPUUtilization)
|
||||
- Metric must already exist
|
||||
required: false
|
||||
namespace:
|
||||
description:
|
||||
- Name of the appropriate namespace, which determines the category it will appear under in cloudwatch
|
||||
required: false
|
||||
options: ['AWS/AutoScaling','AWS/Billing','AWS/DynamoDB','AWS/ElastiCache','AWS/EBS','AWS/EC2','AWS/ELB','AWS/ElasticMapReduce','AWS/OpsWorks','AWS/Redshift','AWS/RDS','AWS/Route53','AWS/SNS','AWS/SQS','AWS/StorageGateway']
|
||||
statistic:
|
||||
description:
|
||||
- Operation applied to the metric
|
||||
- Works in conjunction with period and evaluation_periods to determine the comparison value
|
||||
required: false
|
||||
options: ['SampleCount','Average','Sum','Minimum','Maximum']
|
||||
comparison:
|
||||
description:
|
||||
- Determines how the threshold value is compared
|
||||
required: false
|
||||
options: ['<=','<','>','>=']
|
||||
threshold:
|
||||
description:
|
||||
- Sets the min/max bound for triggering the alarm
|
||||
required: false
|
||||
period:
|
||||
description:
|
||||
- The time (in seconds) between metric evaluations
|
||||
required: false
|
||||
evaluation_periods:
|
||||
description:
|
||||
- The number of times in which the metric is evaluated before final calculation
|
||||
required: false
|
||||
unit:
|
||||
description:
|
||||
- The threshold's unit of measurement
|
||||
required: false
|
||||
options: ['Seconds','Microseconds','Milliseconds','Bytes','Kilobytes','Megabytes','Gigabytes','Terabytes','Bits','Kilobits','Megabits','Gigabits','Terabits','Percent','Count','Bytes/Second','Kilobytes/Second','Megabytes/Second','Gigabytes/Second','Terabytes/Second','Bits/Second','Kilobits/Second','Megabits/Second','Gigabits/Second','Terabits/Second','Count/Second','None']
|
||||
description:
|
||||
description:
|
||||
- A longer desciption of the alarm
|
||||
required: false
|
||||
dimensions:
|
||||
description:
|
||||
- Describes to what the alarm is applied
|
||||
required: false
|
||||
alarm_actions:
|
||||
description:
|
||||
- A list of the names action(s) taken when the alarm is in the 'alarm' status
|
||||
required: false
|
||||
insufficient_data_actions:
|
||||
description:
|
||||
- A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
|
||||
required: false
|
||||
ok_actions:
|
||||
description:
|
||||
- A list of the names of action(s) to take when the alarm is in the 'ok' status
|
||||
required: false
|
||||
extends_documentation_fragment: aws
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: create alarm
|
||||
ec2_metric_alarm:
|
||||
state: present
|
||||
region: ap-southeast-2
|
||||
name: "cpu-low"
|
||||
metric: "CPUUtilization"
|
||||
namespace: "AWS/EC2"
|
||||
statistic: Average
|
||||
comparison: "<="
|
||||
threshold: 5.0
|
||||
period: 300
|
||||
evaluation_periods: 3
|
||||
unit: "Percent"
|
||||
description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
|
||||
dimensions: {'InstanceId':'i-XXX'}
|
||||
alarm_actions: ["action1","action2"]
|
||||
|
||||
|
||||
'''
|
||||
|
||||
import sys
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
try:
|
||||
import boto.ec2.cloudwatch
|
||||
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
|
||||
from boto.exception import BotoServerError
|
||||
except ImportError:
|
||||
print "failed=True msg='boto required for this module'"
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def create_metric_alarm(connection, module):
|
||||
|
||||
name = module.params.get('name')
|
||||
metric = module.params.get('metric')
|
||||
namespace = module.params.get('namespace')
|
||||
statistic = module.params.get('statistic')
|
||||
comparison = module.params.get('comparison')
|
||||
threshold = module.params.get('threshold')
|
||||
period = module.params.get('period')
|
||||
evaluation_periods = module.params.get('evaluation_periods')
|
||||
unit = module.params.get('unit')
|
||||
description = module.params.get('description')
|
||||
dimensions = module.params.get('dimensions')
|
||||
alarm_actions = module.params.get('alarm_actions')
|
||||
insufficient_data_actions = module.params.get('insufficient_data_actions')
|
||||
ok_actions = module.params.get('ok_actions')
|
||||
|
||||
alarms = connection.describe_alarms(alarm_names=[name])
|
||||
|
||||
if not alarms:
|
||||
|
||||
alm = MetricAlarm(
|
||||
name=name,
|
||||
metric=metric,
|
||||
namespace=namespace,
|
||||
statistic=statistic,
|
||||
comparison=comparison,
|
||||
threshold=threshold,
|
||||
period=period,
|
||||
evaluation_periods=evaluation_periods,
|
||||
unit=unit,
|
||||
description=description,
|
||||
dimensions=dimensions,
|
||||
alarm_actions=alarm_actions,
|
||||
insufficient_data_actions=insufficient_data_actions,
|
||||
ok_actions=ok_actions
|
||||
)
|
||||
try:
|
||||
connection.create_alarm(alm)
|
||||
module.exit_json(changed=True)
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
else:
|
||||
alarm = alarms[0]
|
||||
changed = False
|
||||
|
||||
for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
|
||||
if getattr(alarm, attr) != module.params.get(attr):
|
||||
changed = True
|
||||
setattr(alarm, attr, module.params.get(attr))
|
||||
#this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
|
||||
comparison = alarm.comparison
|
||||
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
|
||||
alarm.comparison = comparisons[comparison]
|
||||
|
||||
dim1 = module.params.get('dimensions')
|
||||
dim2 = alarm.dimensions
|
||||
|
||||
for keys in dim1:
|
||||
if not isinstance(dim1[keys], list):
|
||||
dim1[keys] = [dim1[keys]]
|
||||
if dim1[keys] != dim2[keys]:
|
||||
changed=True
|
||||
setattr(alarm, 'dimensions', dim1)
|
||||
|
||||
for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
|
||||
action = module.params.get(attr) or []
|
||||
if getattr(alarm, attr) != action:
|
||||
changed = True
|
||||
setattr(alarm, attr, module.params.get(attr))
|
||||
|
||||
try:
|
||||
if changed:
|
||||
connection.create_alarm(alarm)
|
||||
module.exit_json(changed=changed)
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def delete_metric_alarm(connection, module):
|
||||
name = module.params.get('name')
|
||||
|
||||
alarms = connection.describe_alarms(alarm_names=[name])
|
||||
|
||||
if alarms:
|
||||
try:
|
||||
connection.delete_alarms([name])
|
||||
module.exit_json(changed=True)
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
name=dict(required=True, type='str'),
|
||||
metric=dict(type='str'),
|
||||
namespace=dict(type='str', choices=['AWS/AutoScaling', 'AWS/Billing', 'AWS/DynamoDB', 'AWS/ElastiCache', 'AWS/EBS', 'AWS/EC2',
|
||||
'AWS/ELB', 'AWS/ElasticMapReduce', 'AWS/OpsWorks', 'AWS/Redshift', 'AWS/RDS', 'AWS/Route53', 'AWS/SNS', 'AWS/SQS', 'AWS/StorageGateway']), statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
|
||||
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
|
||||
threshold=dict(type='float'),
|
||||
period=dict(type='int'),
|
||||
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
|
||||
evaluation_periods=dict(type='int'),
|
||||
description=dict(type='str'),
|
||||
dimensions=dict(type='dict'),
|
||||
alarm_actions=dict(type='list'),
|
||||
insufficient_data_actions=dict(type='list'),
|
||||
ok_actions=dict(type='list'),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
region=dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
state = module.params.get('state')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if state == 'present':
|
||||
create_metric_alarm(connection, module)
|
||||
elif state == 'absent':
|
||||
delete_metric_alarm(connection, module)
|
||||
|
||||
main()
|
180
cloud/ec2_scaling_policy
Executable file
180
cloud/ec2_scaling_policy
Executable file
|
@ -0,0 +1,180 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
DOCUMENTATION = """
|
||||
module: ec2_scaling_policy
|
||||
short_description: Create or delete AWS scaling policies for Autoscaling groups
|
||||
description:
|
||||
- Can create or delete scaling policies for autoscaling groups
|
||||
- Referenced autoscaling groups must already exist
|
||||
version_added: "1.6"
|
||||
author: Zacharie Eakin
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- register or deregister the policy
|
||||
required: true
|
||||
choices: ['present', 'absent']
|
||||
name:
|
||||
description:
|
||||
- Unique name for the scaling policy
|
||||
required: true
|
||||
asg_name:
|
||||
description:
|
||||
- Name of the associated autoscaling group
|
||||
required: true
|
||||
adjustment_type:
|
||||
desciption:
|
||||
- The type of change in capacity of the autoscaling group
|
||||
required: false
|
||||
choices: ['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']
|
||||
scaling_adjustment:
|
||||
description:
|
||||
- The amount by which the autoscaling group is adjusted by the policy
|
||||
required: false
|
||||
min_adjustment_step:
|
||||
description:
|
||||
- Minimum amount of adjustment when policy is triggered
|
||||
required: false
|
||||
cooldown:
|
||||
description:
|
||||
- The minimum period of time between which autoscaling actions can take place
|
||||
required: false
|
||||
extends_documentation_fragment: aws
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
- ec2_scaling_policy:
|
||||
state: present
|
||||
region: US-XXX
|
||||
name: "scaledown-policy"
|
||||
adjustment_type: "ChangeInCapacity"
|
||||
asg_name: "slave-pool"
|
||||
scaling_adjustment: -1
|
||||
min_adjustment_step: 1
|
||||
cooldown: 300
|
||||
'''
|
||||
|
||||
|
||||
import sys
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
try:
|
||||
import boto.ec2.autoscale
|
||||
from boto.ec2.autoscale import ScalingPolicy
|
||||
from boto.exception import BotoServerError
|
||||
|
||||
except ImportError:
|
||||
print "failed=True msg='boto required for this module'"
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def create_scaling_policy(connection, module):
|
||||
sp_name = module.params.get('name')
|
||||
adjustment_type = module.params.get('adjustment_type')
|
||||
asg_name = module.params.get('asg_name')
|
||||
scaling_adjustment = module.params.get('scaling_adjustment')
|
||||
min_adjustment_step = module.params.get('min_adjustment_step')
|
||||
cooldown = module.params.get('cooldown')
|
||||
|
||||
scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
|
||||
|
||||
if not scalingPolicies:
|
||||
sp = ScalingPolicy(
|
||||
name=sp_name,
|
||||
adjustment_type=adjustment_type,
|
||||
as_name=asg_name,
|
||||
scaling_adjustment=scaling_adjustment,
|
||||
min_adjustment_step=min_adjustment_step,
|
||||
cooldown=cooldown)
|
||||
|
||||
try:
|
||||
connection.create_scaling_policy(sp)
|
||||
module.exit_json(changed=True)
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
policy = scalingPolicies[0]
|
||||
changed = False
|
||||
|
||||
#min_adjustment_step attribute is only relevant if the adjustment_type
|
||||
#is set to percentage change in capacity, so it is a special case
|
||||
if getattr(policy, 'adjustment_type') == 'PercentChangeInCapacity':
|
||||
if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'):
|
||||
changed = True
|
||||
|
||||
#set the min adjustment step incase the user decided to change their adjustment type to percentage
|
||||
setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step'))
|
||||
|
||||
#check the remaining attributes
|
||||
for attr in ('adjustment_type','scaling_adjustment','cooldown'):
|
||||
if getattr(policy, attr) != module.params.get(attr):
|
||||
changed = True
|
||||
setattr(policy, attr, module.params.get(attr))
|
||||
|
||||
try:
|
||||
if changed:
|
||||
connection.create_scaling_policy(policy)
|
||||
policy = connection.get_all_policies(policy_names=[sp_name])[0]
|
||||
module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
|
||||
module.exit_json(changed=changed)
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def delete_scaling_policy(connection, module):
|
||||
sp_name = module.params.get('name')
|
||||
asg_name = module.params.get('asg_name')
|
||||
|
||||
scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
|
||||
|
||||
if scalingPolicies:
|
||||
try:
|
||||
connection.delete_policy(sp_name, asg_name)
|
||||
module.exit_json(changed=True)
|
||||
except BotoServerError, e:
|
||||
module.exit_json(changed=False, msg=str(e))
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
name = dict(required=True, type='str'),
|
||||
adjustment_type = dict(type='str', choices=['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']),
|
||||
asg_name = dict(required=True, type='str'),
|
||||
scaling_adjustment = dict(type='int'),
|
||||
min_adjustment_step = dict(type='int'),
|
||||
cooldown = dict(type='int'),
|
||||
region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
state = module.params.get('state')
|
||||
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg = str(e))
|
||||
|
||||
if state == 'present':
|
||||
create_scaling_policy(connection, module)
|
||||
elif state == 'absent':
|
||||
delete_scaling_policy(connection, module)
|
||||
|
||||
|
||||
main()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -22,24 +22,6 @@ description:
|
|||
- creates an EC2 snapshot from an existing EBS volume
|
||||
version_added: "1.5"
|
||||
options:
|
||||
ec2_secret_key:
|
||||
description:
|
||||
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
|
||||
required: false
|
||||
default: None
|
||||
aliases: ['aws_secret_key', 'secret_key' ]
|
||||
ec2_access_key:
|
||||
description:
|
||||
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
|
||||
required: false
|
||||
default: None
|
||||
aliases: ['aws_access_key', 'access_key' ]
|
||||
ec2_url:
|
||||
description:
|
||||
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
region:
|
||||
description:
|
||||
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
|
||||
|
@ -59,19 +41,20 @@ options:
|
|||
default: null
|
||||
aliases: []
|
||||
instance_id:
|
||||
description:
|
||||
- instance that has a the required volume to snapshot mounted
|
||||
description:
|
||||
- instance that has the required volume to snapshot mounted
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
device_name:
|
||||
description:
|
||||
description:
|
||||
- device name of a mounted volume to be snapshotted
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
requirements: [ "boto" ]
|
||||
|
||||
author: Will Thames
|
||||
extends_documentation_fragment: aws
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -109,6 +92,9 @@ def main():
|
|||
ec2_url = dict(),
|
||||
ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True),
|
||||
ec2_access_key = dict(aliases=['aws_access_key', 'access_key']),
|
||||
wait = dict(type='bool', default='true'),
|
||||
wait_timeout = dict(default=0),
|
||||
snapshot_tags = dict(type='dict', default=dict()),
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -116,6 +102,9 @@ def main():
|
|||
description = module.params.get('description')
|
||||
instance_id = module.params.get('instance_id')
|
||||
device_name = module.params.get('device_name')
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = module.params.get('wait_timeout')
|
||||
snapshot_tags = module.params.get('snapshot_tags')
|
||||
|
||||
if not volume_id and not instance_id or volume_id and instance_id:
|
||||
module.fail_json('One and only one of volume_id or instance_id must be specified')
|
||||
|
@ -135,10 +124,22 @@ def main():
|
|||
|
||||
try:
|
||||
snapshot = ec2.create_snapshot(volume_id, description=description)
|
||||
time_waited = 0
|
||||
if wait:
|
||||
snapshot.update()
|
||||
while snapshot.status != 'completed':
|
||||
time.sleep(3)
|
||||
snapshot.update()
|
||||
time_waited += 3
|
||||
if wait_timeout and time_waited > wait_timeout:
|
||||
module.fail_json('Timed out while creating snapshot.')
|
||||
for k, v in snapshot_tags.items():
|
||||
snapshot.add_tag(k, v)
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
module.exit_json(changed=True, snapshot_id=snapshot.id)
|
||||
module.exit_json(changed=True, snapshot_id=snapshot.id, volume_id=snapshot.volume_id,
|
||||
volume_size=snapshot.volume_size, tags=snapshot.tags.copy())
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
|
|
@ -19,7 +19,7 @@ DOCUMENTATION = '''
|
|||
module: ec2_tag
|
||||
short_description: create and remove tag(s) to ec2 resources.
|
||||
description:
|
||||
- Creates and removes tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto.
|
||||
- Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto.
|
||||
version_added: "1.3"
|
||||
options:
|
||||
resource:
|
||||
|
@ -30,7 +30,7 @@ options:
|
|||
aliases: []
|
||||
state:
|
||||
description:
|
||||
- Whether the tags should be present or absent on the resource.
|
||||
- Whether the tags should be present or absent on the resource. Use list to interrogate the tags of an instance.
|
||||
required: false
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
|
@ -41,35 +41,9 @@ options:
|
|||
required: false
|
||||
default: null
|
||||
aliases: ['aws_region', 'ec2_region']
|
||||
aws_secret_key:
|
||||
description:
|
||||
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
|
||||
required: false
|
||||
default: None
|
||||
aliases: ['ec2_secret_key', 'secret_key' ]
|
||||
aws_access_key:
|
||||
description:
|
||||
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
|
||||
required: false
|
||||
default: None
|
||||
aliases: ['ec2_access_key', 'access_key' ]
|
||||
ec2_url:
|
||||
description:
|
||||
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
validate_certs:
|
||||
description:
|
||||
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
|
||||
required: false
|
||||
default: "yes"
|
||||
choices: ["yes", "no"]
|
||||
aliases: []
|
||||
version_added: "1.5"
|
||||
|
||||
requirements: [ "boto" ]
|
||||
author: Lester Wade
|
||||
extends_documentation_fragment: aws
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -115,14 +89,14 @@ def main():
|
|||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
resource = dict(required=True),
|
||||
tags = dict(required=True),
|
||||
state = dict(default='present', choices=['present', 'absent']),
|
||||
tags = dict(),
|
||||
state = dict(default='present', choices=['present', 'absent', 'list']),
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
resource = module.params.get('resource')
|
||||
tags = module.params['tags']
|
||||
tags = module.params.get('tags')
|
||||
state = module.params.get('state')
|
||||
|
||||
ec2 = ec2_connect(module)
|
||||
|
@ -140,6 +114,8 @@ def main():
|
|||
tagdict[tag.name] = tag.value
|
||||
|
||||
if state == 'present':
|
||||
if not tags:
|
||||
module.fail_json(msg="tags argument is required when state is present")
|
||||
if set(tags.items()).issubset(set(tagdict.items())):
|
||||
module.exit_json(msg="Tags already exists in %s." %resource, changed=False)
|
||||
else:
|
||||
|
@ -151,6 +127,8 @@ def main():
|
|||
module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True)
|
||||
|
||||
if state == 'absent':
|
||||
if not tags:
|
||||
module.fail_json(msg="tags argument is required when state is absent")
|
||||
for (key, value) in set(tags.items()):
|
||||
if (key, value) not in set(tagdict.items()):
|
||||
baddict[key] = value
|
||||
|
@ -162,10 +140,9 @@ def main():
|
|||
tagger = ec2.delete_tags(resource, dictremove)
|
||||
gettags = ec2.get_all_tags(filters=filters)
|
||||
module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True)
|
||||
|
||||
# print json.dumps({
|
||||
# "current_resource_tags": gettags,
|
||||
# })
|
||||
|
||||
if state == 'list':
|
||||
module.exit_json(changed=False, **tagdict)
|
||||
sys.exit(0)
|
||||
|
||||
# import module snippets
|
||||
|
|
263
cloud/ec2_vol
263
cloud/ec2_vol
|
@ -22,34 +22,30 @@ description:
|
|||
- creates an EBS volume and optionally attaches it to an instance. If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made. This module has a dependency on python-boto.
|
||||
version_added: "1.1"
|
||||
options:
|
||||
aws_secret_key:
|
||||
description:
|
||||
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
|
||||
required: false
|
||||
default: None
|
||||
aliases: ['ec2_secret_key', 'secret_key' ]
|
||||
aws_access_key:
|
||||
description:
|
||||
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
|
||||
required: false
|
||||
default: None
|
||||
aliases: ['ec2_access_key', 'access_key' ]
|
||||
ec2_url:
|
||||
description:
|
||||
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
instance:
|
||||
description:
|
||||
- instance ID if you wish to attach the volume.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
name:
|
||||
description:
|
||||
- volume Name tag if you wish to attach an existing volume (requires instance)
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: "1.6"
|
||||
id:
|
||||
description:
|
||||
- volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: "1.6"
|
||||
volume_size:
|
||||
description:
|
||||
- size of volume (in GB) to create.
|
||||
required: true
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
iops:
|
||||
|
@ -82,6 +78,7 @@ options:
|
|||
- snapshot ID on which to base the volume
|
||||
required: false
|
||||
default: null
|
||||
version_added: "1.5"
|
||||
validate_certs:
|
||||
description:
|
||||
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
|
||||
|
@ -90,9 +87,15 @@ options:
|
|||
choices: ["yes", "no"]
|
||||
aliases: []
|
||||
version_added: "1.5"
|
||||
|
||||
requirements: [ "boto" ]
|
||||
state:
|
||||
description:
|
||||
- whether to ensure the volume is present or absent
|
||||
required: false
|
||||
default: present
|
||||
choices: ['absent', 'present']
|
||||
version_added: "1.6"
|
||||
author: Lester Wade
|
||||
extends_documentation_fragment: aws
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -131,6 +134,34 @@ EXAMPLES = '''
|
|||
volume_size: 5
|
||||
with_items: ec2.instances
|
||||
register: ec2_vol
|
||||
|
||||
# Example: Launch an instance and then add a volue if not already present
|
||||
# * Nothing will happen if the volume is already attached.
|
||||
# * Volume must exist in the same zone.
|
||||
|
||||
- local_action:
|
||||
module: ec2
|
||||
keypair: "{{ keypair }}"
|
||||
image: "{{ image }}"
|
||||
zone: YYYYYY
|
||||
id: my_instance
|
||||
wait: yes
|
||||
count: 1
|
||||
register: ec2
|
||||
|
||||
- local_action:
|
||||
module: ec2_vol
|
||||
instance: "{{ item.id }}"
|
||||
name: my_existing_volume_Name_tag
|
||||
device_name: /dev/xvdf
|
||||
with_items: ec2.instances
|
||||
register: ec2_vol
|
||||
|
||||
# Remove a volume
|
||||
- location: action
|
||||
module: ec2_vol
|
||||
id: vol-XXXXXXXX
|
||||
state: absent
|
||||
'''
|
||||
|
||||
# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes.
|
||||
|
@ -147,82 +178,104 @@ except ImportError:
|
|||
print "failed=True msg='boto required for this module'"
|
||||
sys.exit(1)
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
instance = dict(),
|
||||
volume_size = dict(required=True),
|
||||
iops = dict(),
|
||||
device_name = dict(),
|
||||
zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
|
||||
snapshot = dict(),
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
instance = module.params.get('instance')
|
||||
volume_size = module.params.get('volume_size')
|
||||
iops = module.params.get('iops')
|
||||
device_name = module.params.get('device_name')
|
||||
def get_volume(module, ec2):
|
||||
name = module.params.get('name')
|
||||
id = module.params.get('id')
|
||||
zone = module.params.get('zone')
|
||||
filters = {}
|
||||
volume_ids = None
|
||||
if zone:
|
||||
filters['availability_zone'] = zone
|
||||
if name:
|
||||
filters = {'tag:Name': name}
|
||||
if id:
|
||||
volume_ids = [id]
|
||||
try:
|
||||
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
if not vols:
|
||||
module.fail_json(msg="Could not find volume in zone (if specified): %s" % name or id)
|
||||
if len(vols) > 1:
|
||||
module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
|
||||
return vols[0]
|
||||
|
||||
|
||||
def delete_volume(module, ec2):
|
||||
vol = get_volume(module, ec2)
|
||||
if not vol:
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
if vol.attachment_state() is not None:
|
||||
adata = vol.attach_data
|
||||
module.fail_json(msg="Volume %s is attached to an instance %s." % (vol.id, adata.instance_id))
|
||||
ec2.delete_volume(vol.id)
|
||||
module.exit_json(changed=True)
|
||||
|
||||
|
||||
def create_volume(module, ec2, zone):
|
||||
name = module.params.get('name')
|
||||
id = module.params.get('id')
|
||||
instance = module.params.get('instance')
|
||||
iops = module.params.get('iops')
|
||||
volume_size = module.params.get('volume_size')
|
||||
snapshot = module.params.get('snapshot')
|
||||
|
||||
ec2 = ec2_connect(module)
|
||||
|
||||
# Here we need to get the zone info for the instance. This covers situation where
|
||||
# instance is specified but zone isn't.
|
||||
# Useful for playbooks chaining instance launch with volume create + attach and where the
|
||||
# zone doesn't matter to the user.
|
||||
|
||||
if instance:
|
||||
reservation = ec2.get_all_instances(instance_ids=instance)
|
||||
inst = reservation[0].instances[0]
|
||||
zone = inst.placement
|
||||
|
||||
# Check if there is a volume already mounted there.
|
||||
if device_name:
|
||||
if device_name in inst.block_device_mapping:
|
||||
module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
|
||||
volume_id=inst.block_device_mapping[device_name].volume_id,
|
||||
device=device_name,
|
||||
changed=False)
|
||||
|
||||
# If custom iops is defined we use volume_type "io1" rather than the default of "standard"
|
||||
|
||||
if iops:
|
||||
volume_type = 'io1'
|
||||
else:
|
||||
volume_type = 'standard'
|
||||
|
||||
# If no instance supplied, try volume creation based on module parameters.
|
||||
if name or id:
|
||||
if not instance:
|
||||
module.fail_json(msg = "If name or id is specified, instance must also be specified")
|
||||
if iops or volume_size:
|
||||
module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]")
|
||||
|
||||
try:
|
||||
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
|
||||
while volume.status != 'available':
|
||||
time.sleep(3)
|
||||
volume.update()
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
volume = get_volume(module, ec2)
|
||||
if volume.attachment_state() is not None:
|
||||
adata = volume.attach_data
|
||||
if adata.instance_id != instance:
|
||||
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
|
||||
% (name or id, adata.instance_id))
|
||||
else:
|
||||
module.exit_json(msg="Volume %s is already mapped on instance %s: %s" %
|
||||
(name or id, adata.instance_id, adata.device),
|
||||
volume_id=id,
|
||||
device=adata.device,
|
||||
changed=False)
|
||||
else:
|
||||
try:
|
||||
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
|
||||
while volume.status != 'available':
|
||||
time.sleep(3)
|
||||
volume.update()
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
return volume
|
||||
|
||||
# Attach the created volume.
|
||||
|
||||
def attach_volume(module, ec2, volume, instance):
|
||||
device_name = module.params.get('device_name')
|
||||
|
||||
if device_name and instance:
|
||||
try:
|
||||
attach = volume.attach(inst.id, device_name)
|
||||
attach = volume.attach(instance.id, device_name)
|
||||
while volume.attachment_state() != 'attached':
|
||||
time.sleep(3)
|
||||
volume.update()
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
# If device_name isn't set, make a choice based on best practices here:
|
||||
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
|
||||
|
||||
|
||||
# In future this needs to be more dynamic but combining block device mapping best practices
|
||||
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
|
||||
|
||||
# Use password data attribute to tell whether the instance is Windows or Linux
|
||||
|
||||
if device_name is None and instance:
|
||||
try:
|
||||
if not ec2.get_password_data(inst.id):
|
||||
|
@ -240,11 +293,65 @@ def main():
|
|||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
print json.dumps({
|
||||
"volume_id": volume.id,
|
||||
"device": device_name
|
||||
})
|
||||
sys.exit(0)
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
instance = dict(),
|
||||
id = dict(),
|
||||
name = dict(),
|
||||
volume_size = dict(),
|
||||
iops = dict(),
|
||||
device_name = dict(),
|
||||
zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
|
||||
snapshot = dict(),
|
||||
state = dict(choices=['absent', 'present'], default='present')
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
id = module.params.get('id')
|
||||
name = module.params.get('name')
|
||||
instance = module.params.get('instance')
|
||||
volume_size = module.params.get('volume_size')
|
||||
iops = module.params.get('iops')
|
||||
device_name = module.params.get('device_name')
|
||||
zone = module.params.get('zone')
|
||||
snapshot = module.params.get('snapshot')
|
||||
state = module.params.get('state')
|
||||
|
||||
ec2 = ec2_connect(module)
|
||||
|
||||
if id and name:
|
||||
module.fail_json(msg="Both id and name cannot be specified")
|
||||
|
||||
if not (id or name or volume_size):
|
||||
module.fail_json(msg="Cannot specify volume_size and either one of name or id")
|
||||
|
||||
# Here we need to get the zone info for the instance. This covers situation where
|
||||
# instance is specified but zone isn't.
|
||||
# Useful for playbooks chaining instance launch with volume create + attach and where the
|
||||
# zone doesn't matter to the user.
|
||||
if instance:
|
||||
reservation = ec2.get_all_instances(instance_ids=instance)
|
||||
inst = reservation[0].instances[0]
|
||||
zone = inst.placement
|
||||
|
||||
# Check if there is a volume already mounted there.
|
||||
if device_name:
|
||||
if device_name in inst.block_device_mapping:
|
||||
module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
|
||||
volume_id=inst.block_device_mapping[device_name].volume_id,
|
||||
device=device_name,
|
||||
changed=False)
|
||||
|
||||
if state == 'absent':
|
||||
delete_volume(module, ec2)
|
||||
else:
|
||||
volume = create_volume(module, ec2, zone)
|
||||
if instance:
|
||||
attach_volume(module, ec2, volume, inst)
|
||||
module.exit_json(volume_id=volume.id, device=device_name)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
|
154
cloud/ec2_vpc
154
cloud/ec2_vpc
|
@ -46,7 +46,7 @@ options:
|
|||
choices: [ "yes", "no" ]
|
||||
subnets:
|
||||
description:
|
||||
- "A dictionary array of subnets to add of the form: { cidr: ..., az: ... }. Where az is the desired availability zone of the subnet, but it is not required. All VPC subnets not in this list will be removed."
|
||||
- 'A dictionary array of subnets to add of the form: { cidr: ..., az: ... , resource_tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: resource_tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed.'
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
|
@ -56,6 +56,13 @@ options:
|
|||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
resource_tags:
|
||||
description:
|
||||
- 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exits, a new VPC will be created. VPC tags not on this list will be ignored.'
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: "1.6"
|
||||
internet_gateway:
|
||||
description:
|
||||
- Toggle whether there should be an Internet gateway attached to the VPC
|
||||
|
@ -65,7 +72,7 @@ options:
|
|||
aliases: []
|
||||
route_tables:
|
||||
description:
|
||||
- "A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},] }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. This module is currently unable to affect the 'main' route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly."
|
||||
- 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},] }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly.'
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
|
@ -127,6 +134,7 @@ EXAMPLES = '''
|
|||
module: ec2_vpc
|
||||
state: present
|
||||
cidr_block: 172.23.0.0/16
|
||||
resource_tags: { "Environment":"Development" }
|
||||
region: us-west-2
|
||||
# Full creation example with subnets and optional availability zones.
|
||||
# The absence or presense of subnets deletes or creates them respectively.
|
||||
|
@ -134,13 +142,17 @@ EXAMPLES = '''
|
|||
module: ec2_vpc
|
||||
state: present
|
||||
cidr_block: 172.22.0.0/16
|
||||
resource_tags: { "Environment":"Development" }
|
||||
subnets:
|
||||
- cidr: 172.22.1.0/24
|
||||
az: us-west-2c
|
||||
resource_tags: { "Environment":"Dev", "Tier" : "Web" }
|
||||
- cidr: 172.22.2.0/24
|
||||
az: us-west-2b
|
||||
resource_tags: { "Environment":"Dev", "Tier" : "App" }
|
||||
- cidr: 172.22.3.0/24
|
||||
az: us-west-2a
|
||||
resource_tags: { "Environment":"Dev", "Tier" : "DB" }
|
||||
internet_gateway: True
|
||||
route_tables:
|
||||
- subnets:
|
||||
|
@ -193,9 +205,54 @@ def get_vpc_info(vpc):
|
|||
'state': vpc.state,
|
||||
})
|
||||
|
||||
def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
|
||||
"""
|
||||
Finds a VPC that matches a specific id or cidr + tags
|
||||
|
||||
module : AnsibleModule object
|
||||
vpc_conn: authenticated VPCConnection connection object
|
||||
|
||||
Returns:
|
||||
A VPC object that matches either an ID or CIDR and one or more tag values
|
||||
"""
|
||||
|
||||
if vpc_id == None and cidr == None:
|
||||
module.fail_json(
|
||||
msg='You must specify either a vpc id or a cidr block + list of unique tags, aborting'
|
||||
)
|
||||
|
||||
found_vpcs = []
|
||||
|
||||
resource_tags = module.params.get('resource_tags')
|
||||
|
||||
# Check for existing VPC by cidr_block or id
|
||||
if vpc_id is not None:
|
||||
found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available',})
|
||||
|
||||
else:
|
||||
previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'})
|
||||
|
||||
for vpc in previous_vpcs:
|
||||
# Get all tags for each of the found VPCs
|
||||
vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
|
||||
|
||||
# If the supplied list of ID Tags match a subset of the VPC Tags, we found our VPC
|
||||
if resource_tags and set(resource_tags.items()).issubset(set(vpc_tags.items())):
|
||||
found_vpcs.append(vpc)
|
||||
|
||||
found_vpc = None
|
||||
|
||||
if len(found_vpcs) == 1:
|
||||
found_vpc = found_vpcs[0]
|
||||
|
||||
if len(found_vpcs) > 1:
|
||||
module.fail_json(msg='Found more than one vpc based on the supplied criteria, aborting')
|
||||
|
||||
return (found_vpc)
|
||||
|
||||
def create_vpc(module, vpc_conn):
|
||||
"""
|
||||
Creates a new VPC
|
||||
Creates a new or modifies an existing VPC.
|
||||
|
||||
module : AnsibleModule object
|
||||
vpc_conn: authenticated VPCConnection connection object
|
||||
|
@ -217,20 +274,12 @@ def create_vpc(module, vpc_conn):
|
|||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
changed = False
|
||||
|
||||
# Check for existing VPC by cidr_block or id
|
||||
if id != None:
|
||||
filter_dict = {'vpc-id':id, 'state': 'available',}
|
||||
previous_vpcs = vpc_conn.get_all_vpcs(None, filter_dict)
|
||||
else:
|
||||
filter_dict = {'cidr': cidr_block, 'state': 'available'}
|
||||
previous_vpcs = vpc_conn.get_all_vpcs(None, filter_dict)
|
||||
# Check for existing VPC by cidr_block + tags or id
|
||||
previous_vpc = find_vpc(module, vpc_conn, id, cidr_block)
|
||||
|
||||
if len(previous_vpcs) > 1:
|
||||
module.fail_json(msg='EC2 returned more than one VPC, aborting')
|
||||
|
||||
if len(previous_vpcs) == 1:
|
||||
if previous_vpc is not None:
|
||||
changed = False
|
||||
vpc = previous_vpcs[0]
|
||||
vpc = previous_vpc
|
||||
else:
|
||||
changed = True
|
||||
try:
|
||||
|
@ -255,7 +304,21 @@ def create_vpc(module, vpc_conn):
|
|||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
# Done with base VPC, now change to attributes and features.
|
||||
|
||||
|
||||
# Add resource tags
|
||||
vpc_spec_tags = module.params.get('resource_tags')
|
||||
vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
|
||||
|
||||
if vpc_spec_tags and not set(vpc_spec_tags.items()).issubset(set(vpc_tags.items())):
|
||||
new_tags = {}
|
||||
|
||||
for (key, value) in set(vpc_spec_tags.items()):
|
||||
if (key, value) not in set(vpc_tags.items()):
|
||||
new_tags[key] = value
|
||||
|
||||
if new_tags:
|
||||
vpc_conn.create_tags(vpc.id, new_tags)
|
||||
|
||||
|
||||
# boto doesn't appear to have a way to determine the existing
|
||||
# value of the dns attributes, so we just set them.
|
||||
|
@ -269,6 +332,7 @@ def create_vpc(module, vpc_conn):
|
|||
module.fail_json(msg='subnets needs to be a list of cidr blocks')
|
||||
|
||||
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
|
||||
|
||||
# First add all new subnets
|
||||
for subnet in subnets:
|
||||
add_subnet = True
|
||||
|
@ -277,10 +341,22 @@ def create_vpc(module, vpc_conn):
|
|||
add_subnet = False
|
||||
if add_subnet:
|
||||
try:
|
||||
vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None))
|
||||
new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None))
|
||||
new_subnet_tags = subnet.get('resource_tags', None)
|
||||
if new_subnet_tags:
|
||||
# Sometimes AWS takes its time to create a subnet and so using new subnets's id
|
||||
# to create tags results in exception.
|
||||
# boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending'
|
||||
# so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet
|
||||
while len(vpc_conn.get_all_subnets(filters={ 'subnet-id': new_subnet.id })) == 0:
|
||||
time.sleep(0.1)
|
||||
|
||||
vpc_conn.create_tags(new_subnet.id, new_subnet_tags)
|
||||
|
||||
changed = True
|
||||
except EC2ResponseError, e:
|
||||
module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e))
|
||||
|
||||
# Now delete all absent subnets
|
||||
for csubnet in current_subnets:
|
||||
delete_subnet = True
|
||||
|
@ -332,7 +408,7 @@ def create_vpc(module, vpc_conn):
|
|||
if not isinstance(route_tables, list):
|
||||
module.fail_json(msg='route tables need to be a list of dictionaries')
|
||||
|
||||
# Work through each route table and update/create to match dictionary array
|
||||
# Work through each route table and update/create to match dictionary array
|
||||
all_route_tables = []
|
||||
for rt in route_tables:
|
||||
try:
|
||||
|
@ -350,7 +426,7 @@ def create_vpc(module, vpc_conn):
|
|||
|
||||
# Associate with subnets
|
||||
for sn in rt['subnets']:
|
||||
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn})
|
||||
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
|
||||
if len(rsn) != 1:
|
||||
module.fail_json(
|
||||
msg='The subnet {0} to associate with route_table {1} ' \
|
||||
|
@ -360,7 +436,7 @@ def create_vpc(module, vpc_conn):
|
|||
|
||||
# Disassociate then associate since we don't have replace
|
||||
old_rt = vpc_conn.get_all_route_tables(
|
||||
filters={'association.subnet_id': rsn.id}
|
||||
filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id}
|
||||
)
|
||||
if len(old_rt) == 1:
|
||||
old_rt = old_rt[0]
|
||||
|
@ -405,14 +481,15 @@ def create_vpc(module, vpc_conn):
|
|||
created_vpc_id = vpc.id
|
||||
returned_subnets = []
|
||||
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
|
||||
|
||||
for sn in current_subnets:
|
||||
returned_subnets.append({
|
||||
'resource_tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})),
|
||||
'cidr': sn.cidr_block,
|
||||
'az': sn.availability_zone,
|
||||
'id': sn.id,
|
||||
})
|
||||
|
||||
|
||||
return (vpc_dict, created_vpc_id, returned_subnets, changed)
|
||||
|
||||
def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
|
||||
|
@ -434,23 +511,10 @@ def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
|
|||
vpc_dict = {}
|
||||
terminated_vpc_id = ''
|
||||
changed = False
|
||||
|
||||
if vpc_id == None and cidr == None:
|
||||
module.fail_json(
|
||||
msg='You must either specify a vpc id or a cidr '\
|
||||
'block to terminate a VPC, aborting'
|
||||
)
|
||||
if vpc_id is not None:
|
||||
vpc_rs = vpc_conn.get_all_vpcs(vpc_id)
|
||||
else:
|
||||
vpc_rs = vpc_conn.get_all_vpcs(filters={'cidr': cidr})
|
||||
if len(vpc_rs) > 1:
|
||||
module.fail_json(
|
||||
msg='EC2 returned more than one VPC for id {0} ' \
|
||||
'or cidr {1}, aborting'.format(vpc_id,vidr)
|
||||
)
|
||||
if len(vpc_rs) == 1:
|
||||
vpc = vpc_rs[0]
|
||||
|
||||
vpc = find_vpc(module, vpc_conn, vpc_id, cidr)
|
||||
|
||||
if vpc is not None:
|
||||
if vpc.state == 'available':
|
||||
terminated_vpc_id=vpc.id
|
||||
vpc_dict=get_vpc_info(vpc)
|
||||
|
@ -491,13 +555,14 @@ def main():
|
|||
argument_spec.update(dict(
|
||||
cidr_block = dict(),
|
||||
instance_tenancy = dict(choices=['default', 'dedicated'], default='default'),
|
||||
wait = dict(choices=BOOLEANS, default=False),
|
||||
wait = dict(type='bool', default=False),
|
||||
wait_timeout = dict(default=300),
|
||||
dns_support = dict(choices=BOOLEANS, default=True),
|
||||
dns_hostnames = dict(choices=BOOLEANS, default=True),
|
||||
dns_support = dict(type='bool', default=True),
|
||||
dns_hostnames = dict(type='bool', default=True),
|
||||
subnets = dict(type='list'),
|
||||
vpc_id = dict(),
|
||||
internet_gateway = dict(choices=BOOLEANS, default=False),
|
||||
internet_gateway = dict(type='bool', default=False),
|
||||
resource_tags = dict(type='dict'),
|
||||
route_tables = dict(type='list'),
|
||||
state = dict(choices=['present', 'absent'], default='present'),
|
||||
)
|
||||
|
@ -527,11 +592,6 @@ def main():
|
|||
if module.params.get('state') == 'absent':
|
||||
vpc_id = module.params.get('vpc_id')
|
||||
cidr = module.params.get('cidr_block')
|
||||
if vpc_id == None and cidr == None:
|
||||
module.fail_json(
|
||||
msg='You must either specify a vpc id or a cidr '\
|
||||
'block to terminate a VPC, aborting'
|
||||
)
|
||||
(changed, vpc_dict, new_vpc_id) = terminate_vpc(module, vpc_conn, vpc_id, cidr)
|
||||
subnets_changed = None
|
||||
elif module.params.get('state') == 'present':
|
||||
|
|
|
@ -58,6 +58,12 @@ options:
|
|||
- The port number on which each of the cache nodes will accept connections
|
||||
required: false
|
||||
default: 11211
|
||||
security_group_ids:
|
||||
description:
|
||||
- A list of vpc security group names to associate with this cache cluster. Only use if inside a vpc
|
||||
required: false
|
||||
default: ['default']
|
||||
version_added: "1.6"
|
||||
cache_security_groups:
|
||||
description:
|
||||
- A list of cache security group names to associate with this cache cluster
|
||||
|
@ -152,7 +158,7 @@ class ElastiCacheManager(object):
|
|||
EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
|
||||
|
||||
def __init__(self, module, name, engine, cache_engine_version, node_type,
|
||||
num_nodes, cache_port, cache_security_groups, zone, wait,
|
||||
num_nodes, cache_port, cache_security_groups, security_group_ids, zone, wait,
|
||||
hard_modify, aws_access_key, aws_secret_key, region):
|
||||
self.module = module
|
||||
self.name = name
|
||||
|
@ -162,6 +168,7 @@ class ElastiCacheManager(object):
|
|||
self.num_nodes = num_nodes
|
||||
self.cache_port = cache_port
|
||||
self.cache_security_groups = cache_security_groups
|
||||
self.security_group_ids = security_group_ids
|
||||
self.zone = zone
|
||||
self.wait = wait
|
||||
self.hard_modify = hard_modify
|
||||
|
@ -217,6 +224,7 @@ class ElastiCacheManager(object):
|
|||
engine=self.engine,
|
||||
engine_version=self.cache_engine_version,
|
||||
cache_security_group_names=self.cache_security_groups,
|
||||
security_group_ids=self.security_group_ids,
|
||||
preferred_availability_zone=self.zone,
|
||||
port=self.cache_port)
|
||||
except boto.exception.BotoServerError, e:
|
||||
|
@ -291,6 +299,7 @@ class ElastiCacheManager(object):
|
|||
num_cache_nodes=self.num_nodes,
|
||||
cache_node_ids_to_remove=nodes_to_remove,
|
||||
cache_security_group_names=self.cache_security_groups,
|
||||
security_group_ids=self.security_group_ids,
|
||||
apply_immediately=True,
|
||||
engine_version=self.cache_engine_version)
|
||||
except boto.exception.BotoServerError, e:
|
||||
|
@ -377,12 +386,21 @@ class ElastiCacheManager(object):
|
|||
if self.data[key] != value:
|
||||
return True
|
||||
|
||||
# Check security groups
|
||||
# Check cache security groups
|
||||
cache_security_groups = []
|
||||
for sg in self.data['CacheSecurityGroups']:
|
||||
cache_security_groups.append(sg['CacheSecurityGroupName'])
|
||||
if set(cache_security_groups) - set(self.cache_security_groups):
|
||||
return True
|
||||
|
||||
# check vpc security groups
|
||||
vpc_security_groups = []
|
||||
security_groups = self.data['SecurityGroups'] or []
|
||||
for sg in security_groups:
|
||||
vpc_security_groups.append(sg['SecurityGroupId'])
|
||||
if set(vpc_security_groups) - set(self.security_group_ids):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _requires_destroy_and_create(self):
|
||||
|
@ -469,9 +487,11 @@ def main():
|
|||
cache_port={'required': False, 'default': 11211, 'type': 'int'},
|
||||
cache_security_groups={'required': False, 'default': ['default'],
|
||||
'type': 'list'},
|
||||
security_group_ids={'required': False, 'default': [],
|
||||
'type': 'list'},
|
||||
zone={'required': False, 'default': None},
|
||||
wait={'required': False, 'choices': BOOLEANS, 'default': True},
|
||||
hard_modify={'required': False, 'choices': BOOLEANS, 'default': False}
|
||||
wait={'required': False, 'type' : 'bool', 'default': True},
|
||||
hard_modify={'required': False, 'type': 'bool', 'default': False}
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -489,6 +509,7 @@ def main():
|
|||
num_nodes = module.params['num_nodes']
|
||||
cache_port = module.params['cache_port']
|
||||
cache_security_groups = module.params['cache_security_groups']
|
||||
security_group_ids = module.params['security_group_ids']
|
||||
zone = module.params['zone']
|
||||
wait = module.params['wait']
|
||||
hard_modify = module.params['hard_modify']
|
||||
|
@ -502,7 +523,8 @@ def main():
|
|||
elasticache_manager = ElastiCacheManager(module, name, engine,
|
||||
cache_engine_version, node_type,
|
||||
num_nodes, cache_port,
|
||||
cache_security_groups, zone, wait,
|
||||
cache_security_groups,
|
||||
security_group_ids, zone, wait,
|
||||
hard_modify, aws_access_key,
|
||||
aws_secret_key, region)
|
||||
|
||||
|
|
|
@ -152,11 +152,12 @@ def key_check(module, gs, bucket, obj):
|
|||
def keysum(module, gs, bucket, obj):
|
||||
bucket = gs.lookup(bucket)
|
||||
key_check = bucket.get_key(obj)
|
||||
if key_check:
|
||||
md5_remote = key_check.etag[1:-1]
|
||||
etag_multipart = md5_remote.find('-')!=-1 #Check for multipart, etag is not md5
|
||||
if etag_multipart is True:
|
||||
module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.")
|
||||
if not key_check:
|
||||
return None
|
||||
md5_remote = key_check.etag[1:-1]
|
||||
etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
|
||||
if etag_multipart is True:
|
||||
module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.")
|
||||
return md5_remote
|
||||
|
||||
def bucket_check(module, gs, bucket):
|
||||
|
|
|
@ -351,7 +351,7 @@ def main():
|
|||
metadata = dict(),
|
||||
name = dict(),
|
||||
network = dict(default='default'),
|
||||
persistent_boot_disk = dict(type='bool', choices=BOOLEANS, default=False),
|
||||
persistent_boot_disk = dict(type='bool', default=False),
|
||||
state = dict(choices=['active', 'present', 'absent', 'deleted'],
|
||||
default='present'),
|
||||
tags = dict(type='list'),
|
||||
|
|
|
@ -111,21 +111,21 @@ options:
|
|||
choices: ["active", "present", "absent", "deleted"]
|
||||
aliases: []
|
||||
service_account_email:
|
||||
version_added: 1.5.1
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- service account email
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
pem_file:
|
||||
version_added: 1.5.1
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- path to the pem file associated with the service account email
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
project_id:
|
||||
version_added: 1.5.1
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- your GCE project ID
|
||||
required: false
|
||||
|
|
|
@ -74,21 +74,21 @@ options:
|
|||
choices: ["active", "present", "absent", "deleted"]
|
||||
aliases: []
|
||||
service_account_email:
|
||||
version_added: 1.5.1
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- service account email
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
pem_file:
|
||||
version_added: 1.5.1
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- path to the pem file associated with the service account email
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
project_id:
|
||||
version_added: 1.5.1
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- your GCE project ID
|
||||
required: false
|
||||
|
|
11
cloud/gce_pd
11
cloud/gce_pd
|
@ -76,21 +76,21 @@ options:
|
|||
default: "us-central1-b"
|
||||
aliases: []
|
||||
service_account_email:
|
||||
version_added: 1.5.1
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- service account email
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
pem_file:
|
||||
version_added: 1.5.1
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- path to the pem file associated with the service account email
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
project_id:
|
||||
version_added: 1.5.1
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- your GCE project ID
|
||||
required: false
|
||||
|
@ -127,10 +127,9 @@ except ImportError:
|
|||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
detach_only = dict(choice=BOOLEANS),
|
||||
detach_only = dict(type='bool'),
|
||||
instance_name = dict(),
|
||||
mode = dict(default='READ_ONLY',
|
||||
choices=['READ_WRITE', 'READ_ONLY']),
|
||||
mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
|
||||
name = dict(required=True),
|
||||
size_gb = dict(default=10),
|
||||
state = dict(default='present'),
|
||||
|
|
|
@ -26,6 +26,7 @@ options:
|
|||
- The tenant login_user belongs to
|
||||
required: false
|
||||
default: None
|
||||
version_added: "1.3"
|
||||
token:
|
||||
description:
|
||||
- The token to be uses in case the password is not specified
|
||||
|
|
|
@ -107,6 +107,12 @@ options:
|
|||
- The amount of time the module should wait for the VM to get into active state
|
||||
required: false
|
||||
default: 180
|
||||
user_data:
|
||||
description:
|
||||
- Opaque blob of data which is made available to the instance
|
||||
required: false
|
||||
default: None
|
||||
version_added: "1.6"
|
||||
requirements: ["novaclient"]
|
||||
'''
|
||||
|
||||
|
@ -157,6 +163,8 @@ def _create_server(module, nova):
|
|||
'meta' : module.params['meta'],
|
||||
'key_name': module.params['key_name'],
|
||||
'security_groups': module.params['security_groups'].split(','),
|
||||
#userdata is unhyphenated in novaclient, but hyphenated here for consistency with the ec2 module:
|
||||
'userdata': module.params['user_data'],
|
||||
}
|
||||
if not module.params['key_name']:
|
||||
del bootkwargs['key_name']
|
||||
|
@ -193,7 +201,12 @@ def _get_server_state(module, nova):
|
|||
try:
|
||||
servers = nova.servers.list(True, {'name': module.params['name']})
|
||||
if servers:
|
||||
server = [x for x in servers if x.name == module.params['name']][0]
|
||||
# the {'name': module.params['name']} will also return servers
|
||||
# with names that partially match the server name, so we have to
|
||||
# strictly filter here
|
||||
servers = [x for x in servers if x.name == module.params['name']]
|
||||
if servers:
|
||||
server = servers[0]
|
||||
except Exception, e:
|
||||
module.fail_json(msg = "Error in getting the server list: %s" % e.message)
|
||||
if server and module.params['state'] == 'present':
|
||||
|
@ -227,7 +240,8 @@ def main():
|
|||
meta = dict(default=None),
|
||||
wait = dict(default='yes', choices=['yes', 'no']),
|
||||
wait_for = dict(default=180),
|
||||
state = dict(default='present', choices=['absent', 'present'])
|
||||
state = dict(default='present', choices=['absent', 'present']),
|
||||
user_data = dict(default=None)
|
||||
),
|
||||
)
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
try:
|
||||
from novaclient.v1_1 import client
|
||||
from novaclient.v1_1 import client as nova_client
|
||||
from novaclient import exceptions
|
||||
import time
|
||||
except ImportError:
|
||||
|
|
|
@ -80,6 +80,7 @@ options:
|
|||
- The name of the network of the port to associate with the floating ip. Necessary when VM multiple networks.
|
||||
required: false
|
||||
default: None
|
||||
version_added: "1.5"
|
||||
requirements: ["novaclient", "quantumclient", "neutronclient", "keystoneclient"]
|
||||
'''
|
||||
|
||||
|
|
|
@ -98,6 +98,7 @@ options:
|
|||
- DNS nameservers for this subnet, comma-separated
|
||||
required: false
|
||||
default: None
|
||||
version_added: "1.4"
|
||||
allocation_pool_start:
|
||||
description:
|
||||
- From the subnet pool the starting address from which the IP should be allocated
|
||||
|
@ -259,7 +260,7 @@ def main():
|
|||
tenant_name = dict(default=None),
|
||||
state = dict(default='present', choices=['absent', 'present']),
|
||||
ip_version = dict(default='4', choices=['4', '6']),
|
||||
enable_dhcp = dict(default='true', choices=BOOLEANS),
|
||||
enable_dhcp = dict(default='true', type='bool'),
|
||||
gateway_ip = dict(default=None),
|
||||
dns_nameservers = dict(default=None),
|
||||
allocation_pool_start = dict(default=None),
|
||||
|
|
143
cloud/rax
143
cloud/rax
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/python -tt
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
|
@ -14,6 +14,8 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax
|
||||
|
@ -23,52 +25,6 @@ description:
|
|||
waits for it to be 'running'.
|
||||
version_added: "1.2"
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Rackspace API key (overrides I(credentials))
|
||||
aliases:
|
||||
- password
|
||||
auth_endpoint:
|
||||
description:
|
||||
- The URI of the authentication service
|
||||
default: https://identity.api.rackspacecloud.com/v2.0/
|
||||
version_added: 1.5
|
||||
credentials:
|
||||
description:
|
||||
- File to find the Rackspace credentials in (ignored if I(api_key) and
|
||||
I(username) are provided)
|
||||
default: null
|
||||
aliases:
|
||||
- creds_file
|
||||
env:
|
||||
description:
|
||||
- Environment as configured in ~/.pyrax.cfg,
|
||||
see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration)
|
||||
version_added: 1.5
|
||||
identity_type:
|
||||
description:
|
||||
- Authentication machanism to use, such as rackspace or keystone
|
||||
default: rackspace
|
||||
version_added: 1.5
|
||||
region:
|
||||
description:
|
||||
- Region to create an instance in
|
||||
default: DFW
|
||||
tenant_id:
|
||||
description:
|
||||
- The tenant ID used for authentication
|
||||
version_added: 1.5
|
||||
tenant_name:
|
||||
description:
|
||||
- The tenant name used for authentication
|
||||
version_added: 1.5
|
||||
username:
|
||||
description:
|
||||
- Rackspace username (overrides I(credentials))
|
||||
verify_ssl:
|
||||
description:
|
||||
- Whether or not to require SSL validation of API endpoints
|
||||
version_added: 1.5
|
||||
auto_increment:
|
||||
description:
|
||||
- Whether or not to increment a single number with the name of the
|
||||
|
@ -89,7 +45,9 @@ options:
|
|||
disk_config:
|
||||
description:
|
||||
- Disk partitioning strategy
|
||||
choices: ['auto', 'manual']
|
||||
choices:
|
||||
- auto
|
||||
- manual
|
||||
version_added: '1.4'
|
||||
default: auto
|
||||
exact_count:
|
||||
|
@ -98,6 +56,17 @@ options:
|
|||
state=active/present
|
||||
default: no
|
||||
version_added: 1.4
|
||||
extra_client_args:
|
||||
description:
|
||||
- A hash of key/value pairs to be used when creating the cloudservers
|
||||
client. This is considered an advanced option, use it wisely and
|
||||
with caution.
|
||||
version_added: 1.6
|
||||
extra_create_args:
|
||||
description:
|
||||
- A hash of key/value pairs to be used when creating a new server.
|
||||
This is considered an advanced option, use it wisely and with caution.
|
||||
version_added: 1.6
|
||||
files:
|
||||
description:
|
||||
- Files to insert into the instance. remotefilename:localcontent
|
||||
|
@ -124,7 +93,8 @@ options:
|
|||
description:
|
||||
- key pair to use on the instance
|
||||
default: null
|
||||
aliases: ['keypair']
|
||||
aliases:
|
||||
- keypair
|
||||
meta:
|
||||
description:
|
||||
- A hash of metadata to associate with the instance
|
||||
|
@ -138,31 +108,30 @@ options:
|
|||
- The network to attach to the instances. If specified, you must include
|
||||
ALL networks including the public and private interfaces. Can be C(id)
|
||||
or C(label).
|
||||
default: ['public', 'private']
|
||||
default:
|
||||
- public
|
||||
- private
|
||||
version_added: 1.4
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource
|
||||
choices: ['present', 'absent']
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
default: present
|
||||
wait:
|
||||
description:
|
||||
- wait for the instance to be in state 'running' before returning
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
choices:
|
||||
- "yes"
|
||||
- "no"
|
||||
wait_timeout:
|
||||
description:
|
||||
- how long before wait gives up, in seconds
|
||||
default: 300
|
||||
requirements: [ "pyrax" ]
|
||||
author: Jesse Keating, Matt Martz
|
||||
notes:
|
||||
- The following environment variables can be used, C(RAX_USERNAME),
|
||||
C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
|
||||
- C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
|
||||
appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
|
||||
- C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
|
||||
- C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
|
||||
extends_documentation_fragment: rackspace.openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -206,18 +175,18 @@ EXAMPLES = '''
|
|||
register: rax
|
||||
'''
|
||||
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
|
||||
from uuid import UUID
|
||||
from types import NoneType
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
print("failed=True msg='pyrax is required for this module'")
|
||||
sys.exit(1)
|
||||
HAS_PYRAX = False
|
||||
|
||||
ACTIVE_STATUSES = ('ACTIVE', 'BUILD', 'HARD_REBOOT', 'MIGRATING', 'PASSWORD',
|
||||
'REBOOT', 'REBUILD', 'RESCUE', 'RESIZE', 'REVERT_RESIZE')
|
||||
|
@ -246,7 +215,8 @@ def pyrax_object_to_dict(obj):
|
|||
|
||||
|
||||
def create(module, names, flavor, image, meta, key_name, files,
|
||||
wait, wait_timeout, disk_config, group, nics):
|
||||
wait, wait_timeout, disk_config, group, nics,
|
||||
extra_create_args):
|
||||
|
||||
cs = pyrax.cloudservers
|
||||
changed = False
|
||||
|
@ -266,7 +236,8 @@ def create(module, names, flavor, image, meta, key_name, files,
|
|||
flavor=flavor, meta=meta,
|
||||
key_name=key_name,
|
||||
files=files, nics=nics,
|
||||
disk_config=disk_config))
|
||||
disk_config=disk_config,
|
||||
**extra_create_args))
|
||||
except Exception, e:
|
||||
module.fail_json(msg='%s' % e.message)
|
||||
else:
|
||||
|
@ -405,11 +376,19 @@ def delete(module, instance_ids, wait, wait_timeout):
|
|||
def cloudservers(module, state, name, flavor, image, meta, key_name, files,
|
||||
wait, wait_timeout, disk_config, count, group,
|
||||
instance_ids, exact_count, networks, count_offset,
|
||||
auto_increment):
|
||||
auto_increment, extra_create_args):
|
||||
cs = pyrax.cloudservers
|
||||
cnw = pyrax.cloud_networks
|
||||
if not cnw:
|
||||
module.fail_json(msg='Failed to instantiate client. This '
|
||||
'typically indicates an invalid region or an '
|
||||
'incorrectly capitalized region name.')
|
||||
|
||||
servers = []
|
||||
|
||||
for key, value in meta.items():
|
||||
meta[key] = repr(value)
|
||||
|
||||
# Add the group meta key
|
||||
if group and 'group' not in meta:
|
||||
meta['group'] = group
|
||||
|
@ -602,7 +581,7 @@ def cloudservers(module, state, name, flavor, image, meta, key_name, files,
|
|||
names = [name] * (count - len(servers))
|
||||
|
||||
create(module, names, flavor, image, meta, key_name, files,
|
||||
wait, wait_timeout, disk_config, group, nics)
|
||||
wait, wait_timeout, disk_config, group, nics, extra_create_args)
|
||||
|
||||
elif state == 'absent':
|
||||
if instance_ids is None:
|
||||
|
@ -642,11 +621,13 @@ def main():
|
|||
argument_spec = rax_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
auto_increment=dict(choices=BOOLEANS, default=True, type='bool'),
|
||||
auto_increment=dict(default=True, type='bool'),
|
||||
count=dict(default=1, type='int'),
|
||||
count_offset=dict(default=1, type='int'),
|
||||
disk_config=dict(choices=['auto', 'manual']),
|
||||
exact_count=dict(choices=BOOLEANS, default=False, type='bool'),
|
||||
exact_count=dict(default=False, type='bool'),
|
||||
extra_client_args=dict(type='dict', default={}),
|
||||
extra_create_args=dict(type='dict', default={}),
|
||||
files=dict(type='dict', default={}),
|
||||
flavor=dict(),
|
||||
group=dict(),
|
||||
|
@ -658,7 +639,7 @@ def main():
|
|||
networks=dict(type='list', default=['public', 'private']),
|
||||
service=dict(),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
wait=dict(choices=BOOLEANS, default=False, type='bool'),
|
||||
wait=dict(default=False, type='bool'),
|
||||
wait_timeout=dict(default=300),
|
||||
)
|
||||
)
|
||||
|
@ -668,6 +649,9 @@ def main():
|
|||
required_together=rax_required_together(),
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
service = module.params.get('service')
|
||||
|
||||
if service is not None:
|
||||
|
@ -682,6 +666,8 @@ def main():
|
|||
if disk_config:
|
||||
disk_config = disk_config.upper()
|
||||
exact_count = module.params.get('exact_count', False)
|
||||
extra_client_args = module.params.get('extra_client_args')
|
||||
extra_create_args = module.params.get('extra_create_args')
|
||||
files = module.params.get('files')
|
||||
flavor = module.params.get('flavor')
|
||||
group = module.params.get('group')
|
||||
|
@ -697,10 +683,23 @@ def main():
|
|||
|
||||
setup_rax_module(module, pyrax)
|
||||
|
||||
if extra_client_args:
|
||||
pyrax.cloudservers = pyrax.connect_to_cloudservers(
|
||||
region=pyrax.cloudservers.client.region_name,
|
||||
**extra_client_args)
|
||||
client = pyrax.cloudservers.client
|
||||
if 'bypass_url' in extra_client_args:
|
||||
client.management_url = extra_client_args['bypass_url']
|
||||
|
||||
if pyrax.cloudservers is None:
|
||||
module.fail_json(msg='Failed to instantiate client. This '
|
||||
'typically indicates an invalid region or an '
|
||||
'incorrectly capitalized region name.')
|
||||
|
||||
cloudservers(module, state, name, flavor, image, meta, key_name, files,
|
||||
wait, wait_timeout, disk_config, count, group,
|
||||
instance_ids, exact_count, networks, count_offset,
|
||||
auto_increment)
|
||||
auto_increment, extra_create_args)
|
||||
|
||||
|
||||
# import module snippets
|
||||
|
|
236
cloud/rax_cbs
Normal file
236
cloud/rax_cbs
Normal file
|
@ -0,0 +1,236 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_cbs
|
||||
short_description: Manipulate Rackspace Cloud Block Storage Volumes
|
||||
description:
|
||||
- Manipulate Rackspace Cloud Block Storage Volumes
|
||||
version_added: 1.6
|
||||
options:
|
||||
description:
|
||||
description:
|
||||
- Description to give the volume being created
|
||||
default: null
|
||||
meta:
|
||||
description:
|
||||
- A hash of metadata to associate with the volume
|
||||
default: null
|
||||
name:
|
||||
description:
|
||||
- Name to give the volume being created
|
||||
default: null
|
||||
required: true
|
||||
size:
|
||||
description:
|
||||
- Size of the volume to create in Gigabytes
|
||||
default: 100
|
||||
required: true
|
||||
snapshot_id:
|
||||
description:
|
||||
- The id of the snapshot to create the volume from
|
||||
default: null
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
default: present
|
||||
required: true
|
||||
volume_type:
|
||||
description:
|
||||
- Type of the volume being created
|
||||
choices:
|
||||
- SATA
|
||||
- SSD
|
||||
default: SATA
|
||||
required: true
|
||||
wait:
|
||||
description:
|
||||
- wait for the volume to be in state 'available' before returning
|
||||
default: "no"
|
||||
choices:
|
||||
- "yes"
|
||||
- "no"
|
||||
wait_timeout:
|
||||
description:
|
||||
- how long before wait gives up, in seconds
|
||||
default: 300
|
||||
author: Christopher H. Laco, Matt Martz
|
||||
extends_documentation_fragment: rackspace.openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Build a Block Storage Volume
|
||||
gather_facts: False
|
||||
hosts: local
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Storage volume create request
|
||||
local_action:
|
||||
module: rax_cbs
|
||||
credentials: ~/.raxpub
|
||||
name: my-volume
|
||||
description: My Volume
|
||||
volume_type: SSD
|
||||
size: 150
|
||||
region: DFW
|
||||
wait: yes
|
||||
state: present
|
||||
meta:
|
||||
app: my-cool-app
|
||||
register: my_volume
|
||||
'''
|
||||
|
||||
import sys
|
||||
|
||||
from uuid import UUID
|
||||
from types import NoneType
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
HAS_PYRAX = False
|
||||
|
||||
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
|
||||
VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
|
||||
'error', 'error_deleting')
|
||||
|
||||
|
||||
def cloud_block_storage(module, state, name, description, meta, size,
|
||||
snapshot_id, volume_type, wait, wait_timeout):
|
||||
for arg in (state, name, size, volume_type):
|
||||
if not arg:
|
||||
module.fail_json(msg='%s is required for rax_cbs' % arg)
|
||||
|
||||
if size < 100:
|
||||
module.fail_json(msg='"size" must be greater than or equal to 100')
|
||||
|
||||
changed = False
|
||||
volume = None
|
||||
instance = {}
|
||||
|
||||
cbs = pyrax.cloud_blockstorage
|
||||
|
||||
if cbs is None:
|
||||
module.fail_json(msg='Failed to instantiate client. This '
|
||||
'typically indicates an invalid region or an '
|
||||
'incorrectly capitalized region name.')
|
||||
|
||||
try:
|
||||
UUID(name)
|
||||
volume = cbs.get(name)
|
||||
except ValueError:
|
||||
try:
|
||||
volume = cbs.find(name=name)
|
||||
except Exception, e:
|
||||
module.fail_json(msg='%s' % e)
|
||||
|
||||
if state == 'present':
|
||||
if not volume:
|
||||
try:
|
||||
volume = cbs.create(name, size=size, volume_type=volume_type,
|
||||
description=description,
|
||||
metadata=meta,
|
||||
snapshot_id=snapshot_id)
|
||||
changed = True
|
||||
except Exception, e:
|
||||
module.fail_json(msg='%s' % e.message)
|
||||
else:
|
||||
if wait:
|
||||
attempts = wait_timeout / 5
|
||||
pyrax.utils.wait_for_build(volume, interval=5,
|
||||
attempts=attempts)
|
||||
|
||||
volume.get()
|
||||
for key, value in vars(volume).iteritems():
|
||||
if (isinstance(value, NON_CALLABLES) and
|
||||
not key.startswith('_')):
|
||||
instance[key] = value
|
||||
|
||||
result = dict(changed=changed, volume=instance)
|
||||
|
||||
if volume.status == 'error':
|
||||
result['msg'] = '%s failed to build' % volume.id
|
||||
elif wait and volume.status not in VOLUME_STATUS:
|
||||
result['msg'] = 'Timeout waiting on %s' % volume.id
|
||||
|
||||
if 'msg' in result:
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
elif state == 'absent':
|
||||
if volume:
|
||||
try:
|
||||
volume.delete()
|
||||
changed = True
|
||||
except Exception, e:
|
||||
module.fail_json(msg='%s' % e.message)
|
||||
|
||||
module.exit_json(changed=changed, volume=instance)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = rax_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
description=dict(),
|
||||
meta=dict(type='dict', default={}),
|
||||
name=dict(required=True),
|
||||
size=dict(type='int', default=100),
|
||||
snapshot_id=dict(),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
|
||||
wait=dict(type='bool', default=False),
|
||||
wait_timeout=dict(type='int', default=300)
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_together=rax_required_together()
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
description = module.params.get('description')
|
||||
meta = module.params.get('meta')
|
||||
name = module.params.get('name')
|
||||
size = module.params.get('size')
|
||||
snapshot_id = module.params.get('snapshot_id')
|
||||
state = module.params.get('state')
|
||||
volume_type = module.params.get('volume_type')
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = module.params.get('wait_timeout')
|
||||
|
||||
setup_rax_module(module, pyrax)
|
||||
|
||||
cloud_block_storage(module, state, name, description, meta, size,
|
||||
snapshot_id, volume_type, wait, wait_timeout)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.rax import *
|
||||
|
||||
### invoke the module
|
||||
main()
|
268
cloud/rax_cbs_attachments
Normal file
268
cloud/rax_cbs_attachments
Normal file
|
@ -0,0 +1,268 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_cbs_attachments
|
||||
short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments
|
||||
description:
|
||||
- Manipulate Rackspace Cloud Block Storage Volume Attachments
|
||||
version_added: 1.6
|
||||
options:
|
||||
device:
|
||||
description:
|
||||
- The device path to attach the volume to, e.g. /dev/xvde
|
||||
default: null
|
||||
required: true
|
||||
volume:
|
||||
description:
|
||||
- Name or id of the volume to attach/detach
|
||||
default: null
|
||||
required: true
|
||||
server:
|
||||
description:
|
||||
- Name or id of the server to attach/detach
|
||||
default: null
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
default: present
|
||||
required: true
|
||||
wait:
|
||||
description:
|
||||
- wait for the volume to be in 'in-use'/'available' state before returning
|
||||
default: "no"
|
||||
choices:
|
||||
- "yes"
|
||||
- "no"
|
||||
wait_timeout:
|
||||
description:
|
||||
- how long before wait gives up, in seconds
|
||||
default: 300
|
||||
author: Christopher H. Laco, Matt Martz
|
||||
extends_documentation_fragment: rackspace.openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Attach a Block Storage Volume
|
||||
gather_facts: False
|
||||
hosts: local
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Storage volume attach request
|
||||
local_action:
|
||||
module: rax_cbs_attachments
|
||||
credentials: ~/.raxpub
|
||||
volume: my-volume
|
||||
server: my-server
|
||||
device: /dev/xvdd
|
||||
region: DFW
|
||||
wait: yes
|
||||
state: present
|
||||
register: my_volume
|
||||
'''
|
||||
|
||||
import sys
|
||||
|
||||
from uuid import UUID
|
||||
from types import NoneType
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
HAS_PYRAX = False
|
||||
|
||||
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
|
||||
|
||||
|
||||
def cloud_block_storage_attachments(module, state, volume, server, device,
|
||||
wait, wait_timeout):
|
||||
for arg in (state, volume, server, device):
|
||||
if not arg:
|
||||
module.fail_json(msg='%s is required for rax_cbs_attachments' %
|
||||
arg)
|
||||
|
||||
cbs = pyrax.cloud_blockstorage
|
||||
cs = pyrax.cloudservers
|
||||
|
||||
if cbs is None or cs is None:
|
||||
module.fail_json(msg='Failed to instantiate client. This '
|
||||
'typically indicates an invalid region or an '
|
||||
'incorrectly capitalized region name.')
|
||||
|
||||
changed = False
|
||||
instance = {}
|
||||
|
||||
try:
|
||||
UUID(volume)
|
||||
volume = cbs.get(volume)
|
||||
except ValueError:
|
||||
try:
|
||||
volume = cbs.find(name=volume)
|
||||
except Exception, e:
|
||||
module.fail_json(msg='%s' % e)
|
||||
|
||||
if not volume:
|
||||
module.fail_json(msg='No matching storage volumes were found')
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
UUID(server)
|
||||
server = cs.servers.get(server)
|
||||
except ValueError:
|
||||
servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
|
||||
if not servers:
|
||||
module.fail_json(msg='No Server was matched by name, '
|
||||
'try using the Server ID instead')
|
||||
if len(servers) > 1:
|
||||
module.fail_json(msg='Multiple servers matched by name, '
|
||||
'try using the Server ID instead')
|
||||
|
||||
# We made it this far, grab the first and hopefully only server
|
||||
# in the list
|
||||
server = servers[0]
|
||||
|
||||
if (volume.attachments and
|
||||
volume.attachments[0]['server_id'] == server.id):
|
||||
changed = False
|
||||
elif volume.attachments:
|
||||
module.fail_json(msg='Volume is attached to another server')
|
||||
else:
|
||||
try:
|
||||
volume.attach_to_instance(server, mountpoint=device)
|
||||
changed = True
|
||||
except Exception, e:
|
||||
module.fail_json(msg='%s' % e.message)
|
||||
|
||||
volume.get()
|
||||
|
||||
for key, value in vars(volume).iteritems():
|
||||
if (isinstance(value, NON_CALLABLES) and
|
||||
not key.startswith('_')):
|
||||
instance[key] = value
|
||||
|
||||
result = dict(changed=changed, volume=instance)
|
||||
|
||||
if volume.status == 'error':
|
||||
result['msg'] = '%s failed to build' % volume.id
|
||||
elif wait:
|
||||
attempts = wait_timeout / 5
|
||||
pyrax.utils.wait_until(volume, 'status', 'in-use',
|
||||
interval=5, attempts=attempts)
|
||||
|
||||
if 'msg' in result:
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
elif state == 'absent':
|
||||
try:
|
||||
UUID(server)
|
||||
server = cs.servers.get(server)
|
||||
except ValueError:
|
||||
servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
|
||||
if not servers:
|
||||
module.fail_json(msg='No Server was matched by name, '
|
||||
'try using the Server ID instead')
|
||||
if len(servers) > 1:
|
||||
module.fail_json(msg='Multiple servers matched by name, '
|
||||
'try using the Server ID instead')
|
||||
|
||||
# We made it this far, grab the first and hopefully only server
|
||||
# in the list
|
||||
server = servers[0]
|
||||
|
||||
if (volume.attachments and
|
||||
volume.attachments[0]['server_id'] == server.id):
|
||||
try:
|
||||
volume.detach()
|
||||
if wait:
|
||||
pyrax.utils.wait_until(volume, 'status', 'available',
|
||||
interval=3, attempts=0,
|
||||
verbose=False)
|
||||
changed = True
|
||||
except Exception, e:
|
||||
module.fail_json(msg='%s' % e.message)
|
||||
|
||||
volume.get()
|
||||
changed = True
|
||||
elif volume.attachments:
|
||||
module.fail_json(msg='Volume is attached to another server')
|
||||
|
||||
for key, value in vars(volume).iteritems():
|
||||
if (isinstance(value, NON_CALLABLES) and
|
||||
not key.startswith('_')):
|
||||
instance[key] = value
|
||||
|
||||
result = dict(changed=changed, volume=instance)
|
||||
|
||||
if volume.status == 'error':
|
||||
result['msg'] = '%s failed to build' % volume.id
|
||||
|
||||
if 'msg' in result:
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
module.exit_json(changed=changed, volume=instance)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = rax_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
device=dict(required=True),
|
||||
volume=dict(required=True),
|
||||
server=dict(required=True),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
wait=dict(type='bool', default=False),
|
||||
wait_timeout=dict(type='int', default=300)
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_together=rax_required_together()
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
device = module.params.get('device')
|
||||
volume = module.params.get('volume')
|
||||
server = module.params.get('server')
|
||||
state = module.params.get('state')
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = module.params.get('wait_timeout')
|
||||
|
||||
setup_rax_module(module, pyrax)
|
||||
|
||||
cloud_block_storage_attachments(module, state, volume, server, device,
|
||||
wait, wait_timeout)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.rax import *
|
||||
|
||||
### invoke the module
|
||||
main()
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/python -tt
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
|
@ -14,6 +14,8 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_clb
|
||||
|
@ -25,17 +27,13 @@ options:
|
|||
algorithm:
|
||||
description:
|
||||
- algorithm for the balancer being created
|
||||
choices: ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN', 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
|
||||
choices:
|
||||
- RANDOM
|
||||
- LEAST_CONNECTIONS
|
||||
- ROUND_ROBIN
|
||||
- WEIGHTED_LEAST_CONNECTIONS
|
||||
- WEIGHTED_ROUND_ROBIN
|
||||
default: LEAST_CONNECTIONS
|
||||
api_key:
|
||||
description:
|
||||
- Rackspace API key (overrides C(credentials))
|
||||
credentials:
|
||||
description:
|
||||
- File to find the Rackspace credentials in (ignored if C(api_key) and
|
||||
C(username) are provided)
|
||||
default: null
|
||||
aliases: ['creds_file']
|
||||
meta:
|
||||
description:
|
||||
- A hash of metadata to associate with the instance
|
||||
|
@ -51,16 +49,32 @@ options:
|
|||
protocol:
|
||||
description:
|
||||
- Protocol for the balancer being created
|
||||
choices: ['DNS_TCP', 'DNS_UDP' ,'FTP', 'HTTP', 'HTTPS', 'IMAPS', 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP', 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
|
||||
choices:
|
||||
- DNS_TCP
|
||||
- DNS_UDP
|
||||
- FTP
|
||||
- HTTP
|
||||
- HTTPS
|
||||
- IMAPS
|
||||
- IMAPv4
|
||||
- LDAP
|
||||
- LDAPS
|
||||
- MYSQL
|
||||
- POP3
|
||||
- POP3S
|
||||
- SMTP
|
||||
- TCP
|
||||
- TCP_CLIENT_FIRST
|
||||
- UDP
|
||||
- UDP_STREAM
|
||||
- SFTP
|
||||
default: HTTP
|
||||
region:
|
||||
description:
|
||||
- Region to create the load balancer in
|
||||
default: DFW
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource
|
||||
choices: ['present', 'absent']
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
default: present
|
||||
timeout:
|
||||
description:
|
||||
|
@ -69,11 +83,10 @@ options:
|
|||
type:
|
||||
description:
|
||||
- type of interface for the balancer being created
|
||||
choices: ['PUBLIC', 'SERVICENET']
|
||||
choices:
|
||||
- PUBLIC
|
||||
- SERVICENET
|
||||
default: PUBLIC
|
||||
username:
|
||||
description:
|
||||
- Rackspace username (overrides C(credentials))
|
||||
vip_id:
|
||||
description:
|
||||
- Virtual IP ID to use when creating the load balancer for purposes of
|
||||
|
@ -83,20 +96,15 @@ options:
|
|||
description:
|
||||
- wait for the balancer to be in state 'running' before returning
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
choices:
|
||||
- "yes"
|
||||
- "no"
|
||||
wait_timeout:
|
||||
description:
|
||||
- how long before wait gives up, in seconds
|
||||
default: 300
|
||||
requirements: [ "pyrax" ]
|
||||
author: Christopher H. Laco, Matt Martz
|
||||
notes:
|
||||
- The following environment variables can be used, C(RAX_USERNAME),
|
||||
C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
|
||||
- C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
|
||||
appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
|
||||
- C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
|
||||
- C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
|
||||
extends_documentation_fragment: rackspace
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -122,15 +130,13 @@ EXAMPLES = '''
|
|||
register: my_lb
|
||||
'''
|
||||
|
||||
import sys
|
||||
|
||||
from types import NoneType
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
print("failed=True msg='pyrax required for this module'")
|
||||
sys.exit(1)
|
||||
HAS_PYRAX = False
|
||||
|
||||
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
|
||||
ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
|
||||
|
@ -182,6 +188,10 @@ def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
|
|||
balancers = []
|
||||
|
||||
clb = pyrax.cloud_loadbalancers
|
||||
if not clb:
|
||||
module.fail_json(msg='Failed to instantiate client. This '
|
||||
'typically indicates an invalid region or an '
|
||||
'incorrectly capitalized region name.')
|
||||
|
||||
for balancer in clb.list():
|
||||
if name != balancer.name and name != balancer.id:
|
||||
|
@ -300,6 +310,9 @@ def main():
|
|||
required_together=rax_required_together(),
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
algorithm = module.params.get('algorithm')
|
||||
meta = module.params.get('meta')
|
||||
name = module.params.get('name')
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_clb_nodes
|
||||
|
@ -26,21 +28,15 @@ options:
|
|||
required: false
|
||||
description:
|
||||
- IP address or domain name of the node
|
||||
api_key:
|
||||
required: false
|
||||
description:
|
||||
- Rackspace API key (overrides C(credentials))
|
||||
condition:
|
||||
required: false
|
||||
choices: [ "enabled", "disabled", "draining" ]
|
||||
choices:
|
||||
- enabled
|
||||
- disabled
|
||||
- draining
|
||||
description:
|
||||
- Condition for the node, which determines its role within the load
|
||||
balancer
|
||||
credentials:
|
||||
required: false
|
||||
description:
|
||||
- File to find the Rackspace credentials in (ignored if C(api_key) and
|
||||
C(username) are provided)
|
||||
load_balancer_id:
|
||||
required: true
|
||||
type: integer
|
||||
|
@ -56,35 +52,27 @@ options:
|
|||
type: integer
|
||||
description:
|
||||
- Port number of the load balanced service on the node
|
||||
region:
|
||||
required: false
|
||||
description:
|
||||
- Region to authenticate in
|
||||
state:
|
||||
required: false
|
||||
default: "present"
|
||||
choices: [ "present", "absent" ]
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
description:
|
||||
- Indicate desired state of the node
|
||||
type:
|
||||
required: false
|
||||
choices: [ "primary", "secondary" ]
|
||||
choices:
|
||||
- primary
|
||||
- secondary
|
||||
description:
|
||||
- Type of node
|
||||
username:
|
||||
required: false
|
||||
description:
|
||||
- Rackspace username (overrides C(credentials))
|
||||
virtualenv:
|
||||
required: false
|
||||
description:
|
||||
- Path to a virtualenv that should be activated before doing anything.
|
||||
The virtualenv has to already exist. Useful if installing pyrax
|
||||
globally is not an option.
|
||||
wait:
|
||||
required: false
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
choices:
|
||||
- "yes"
|
||||
- "no"
|
||||
description:
|
||||
- Wait for the load balancer to become active before returning
|
||||
wait_timeout:
|
||||
|
@ -97,11 +85,8 @@ options:
|
|||
required: false
|
||||
description:
|
||||
- Weight of node
|
||||
requirements: [ "pyrax" ]
|
||||
author: Lukasz Kawczynski
|
||||
notes:
|
||||
- "The following environment variables can be used: C(RAX_USERNAME),
|
||||
C(RAX_API_KEY), C(RAX_CREDENTIALS) and C(RAX_REGION)."
|
||||
extends_documentation_fragment: rackspace
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -136,13 +121,12 @@ EXAMPLES = '''
|
|||
'''
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
print("failed=True msg='pyrax is required for this module'")
|
||||
sys.exit(1)
|
||||
HAS_PYRAX = False
|
||||
|
||||
|
||||
def _activate_virtualenv(path):
|
||||
|
@ -151,11 +135,20 @@ def _activate_virtualenv(path):
|
|||
execfile(activate_this, dict(__file__=activate_this))
|
||||
|
||||
|
||||
def _get_node(lb, node_id):
|
||||
"""Return a node with the given `node_id`"""
|
||||
for node in lb.nodes:
|
||||
if node.id == node_id:
|
||||
def _get_node(lb, node_id=None, address=None, port=None):
|
||||
"""Return a matching node"""
|
||||
for node in getattr(lb, 'nodes', []):
|
||||
match_list = []
|
||||
if node_id is not None:
|
||||
match_list.append(getattr(node, 'id', None) == node_id)
|
||||
if address is not None:
|
||||
match_list.append(getattr(node, 'address', None) == address)
|
||||
if port is not None:
|
||||
match_list.append(getattr(node, 'port', None) == port)
|
||||
|
||||
if match_list and all(match_list):
|
||||
return node
|
||||
|
||||
return None
|
||||
|
||||
|
||||
|
@ -211,6 +204,9 @@ def main():
|
|||
required_together=rax_required_together(),
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
address = module.params['address']
|
||||
condition = (module.params['condition'] and
|
||||
module.params['condition'].upper())
|
||||
|
@ -234,18 +230,16 @@ def main():
|
|||
setup_rax_module(module, pyrax)
|
||||
|
||||
if not pyrax.cloud_loadbalancers:
|
||||
module.fail_json(msg='Failed to instantiate load balancer client '
|
||||
'(possibly incorrect region)')
|
||||
module.fail_json(msg='Failed to instantiate client. This '
|
||||
'typically indicates an invalid region or an '
|
||||
'incorrectly capitalized region name.')
|
||||
|
||||
try:
|
||||
lb = pyrax.cloud_loadbalancers.get(load_balancer_id)
|
||||
except pyrax.exc.PyraxException, e:
|
||||
module.fail_json(msg='%s' % e.message)
|
||||
|
||||
if node_id:
|
||||
node = _get_node(lb, node_id)
|
||||
else:
|
||||
node = None
|
||||
node = _get_node(lb, node_id, address, port)
|
||||
|
||||
result = _node_to_dict(node)
|
||||
|
||||
|
@ -284,22 +278,12 @@ def main():
|
|||
except pyrax.exc.PyraxException, e:
|
||||
module.fail_json(msg='%s' % e.message)
|
||||
else: # Updating an existing node
|
||||
immutable = {
|
||||
'address': address,
|
||||
'port': port,
|
||||
}
|
||||
|
||||
mutable = {
|
||||
'condition': condition,
|
||||
'type': typ,
|
||||
'weight': weight,
|
||||
}
|
||||
|
||||
for name, value in immutable.items():
|
||||
if value:
|
||||
module.fail_json(
|
||||
msg='Attribute %s cannot be modified' % name)
|
||||
|
||||
for name, value in mutable.items():
|
||||
if value is None or value == getattr(node, name):
|
||||
mutable.pop(name)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/python -tt
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
|
@ -14,6 +14,8 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_dns
|
||||
|
@ -22,18 +24,9 @@ description:
|
|||
- Manage domains on Rackspace Cloud DNS
|
||||
version_added: 1.5
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Rackspace API key (overrides C(credentials))
|
||||
comment:
|
||||
description:
|
||||
- Brief description of the domain. Maximum length of 160 characters
|
||||
credentials:
|
||||
description:
|
||||
- File to find the Rackspace credentials in (ignored if C(api_key) and
|
||||
C(username) are provided)
|
||||
default: null
|
||||
aliases: ['creds_file']
|
||||
email:
|
||||
desctiption:
|
||||
- Email address of the domain administrator
|
||||
|
@ -43,24 +36,16 @@ options:
|
|||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource
|
||||
choices: ['present', 'absent']
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
default: present
|
||||
ttl:
|
||||
description:
|
||||
- Time to live of domain in seconds
|
||||
default: 3600
|
||||
username:
|
||||
description:
|
||||
- Rackspace username (overrides C(credentials))
|
||||
requirements: [ "pyrax" ]
|
||||
author: Matt Martz
|
||||
notes:
|
||||
- The following environment variables can be used, C(RAX_USERNAME),
|
||||
C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
|
||||
- C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
|
||||
appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
|
||||
- C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
|
||||
- C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
|
||||
extends_documentation_fragment: rackspace
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -77,16 +62,13 @@ EXAMPLES = '''
|
|||
register: rax_dns
|
||||
'''
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
from types import NoneType
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
print("failed=True msg='pyrax required for this module'")
|
||||
sys.exit(1)
|
||||
HAS_PYRAX = False
|
||||
|
||||
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
|
||||
|
||||
|
@ -104,6 +86,10 @@ def rax_dns(module, comment, email, name, state, ttl):
|
|||
changed = False
|
||||
|
||||
dns = pyrax.cloud_dns
|
||||
if not dns:
|
||||
module.fail_json(msg='Failed to instantiate client. This '
|
||||
'typically indicates an invalid region or an '
|
||||
'incorrectly capitalized region name.')
|
||||
|
||||
if state == 'present':
|
||||
if not email:
|
||||
|
@ -174,6 +160,9 @@ def main():
|
|||
required_together=rax_required_together(),
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
comment = module.params.get('comment')
|
||||
email = module.params.get('email')
|
||||
name = module.params.get('name')
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/python -tt
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
|
@ -14,6 +14,8 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_dns_record
|
||||
|
@ -22,18 +24,9 @@ description:
|
|||
- Manage DNS records on Rackspace Cloud DNS
|
||||
version_added: 1.5
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Rackspace API key (overrides C(credentials))
|
||||
comment:
|
||||
description:
|
||||
- Brief description of the domain. Maximum length of 160 characters
|
||||
credentials:
|
||||
description:
|
||||
- File to find the Rackspace credentials in (ignored if C(api_key) and
|
||||
C(username) are provided)
|
||||
default: null
|
||||
aliases: ['creds_file']
|
||||
data:
|
||||
description:
|
||||
- IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
|
||||
|
@ -54,7 +47,9 @@ options:
|
|||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource
|
||||
choices: ['present', 'absent']
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
default: present
|
||||
ttl:
|
||||
description:
|
||||
|
@ -63,20 +58,17 @@ options:
|
|||
type:
|
||||
description:
|
||||
- DNS record type
|
||||
choices: ['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT']
|
||||
choices:
|
||||
- A
|
||||
- AAAA
|
||||
- CNAME
|
||||
- MX
|
||||
- NS
|
||||
- SRV
|
||||
- TXT
|
||||
default: A
|
||||
username:
|
||||
description:
|
||||
- Rackspace username (overrides C(credentials))
|
||||
requirements: [ "pyrax" ]
|
||||
author: Matt Martz
|
||||
notes:
|
||||
- The following environment variables can be used, C(RAX_USERNAME),
|
||||
C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
|
||||
- C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
|
||||
appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
|
||||
- C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
|
||||
- C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
|
||||
extends_documentation_fragment: rackspace
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -95,16 +87,13 @@ EXAMPLES = '''
|
|||
register: rax_dns_record
|
||||
'''
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
from types import NoneType
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
print("failed=True msg='pyrax required for this module'")
|
||||
sys.exit(1)
|
||||
HAS_PYRAX = False
|
||||
|
||||
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
|
||||
|
||||
|
@ -123,6 +112,10 @@ def rax_dns_record(module, comment, data, domain, name, priority, record_type,
|
|||
changed = False
|
||||
|
||||
dns = pyrax.cloud_dns
|
||||
if not dns:
|
||||
module.fail_json(msg='Failed to instantiate client. This '
|
||||
'typically indicates an invalid region or an '
|
||||
'incorrectly capitalized region name.')
|
||||
|
||||
if state == 'present':
|
||||
if not priority and record_type in ['MX', 'SRV']:
|
||||
|
@ -219,6 +212,9 @@ def main():
|
|||
required_together=rax_required_together(),
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
comment = module.params.get('comment')
|
||||
data = module.params.get('data')
|
||||
domain = module.params.get('domain')
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/python -tt
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
|
@ -14,6 +14,8 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_facts
|
||||
|
@ -22,52 +24,6 @@ description:
|
|||
- Gather facts for Rackspace Cloud Servers.
|
||||
version_added: "1.4"
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Rackspace API key (overrides I(credentials))
|
||||
aliases:
|
||||
- password
|
||||
auth_endpoint:
|
||||
description:
|
||||
- The URI of the authentication service
|
||||
default: https://identity.api.rackspacecloud.com/v2.0/
|
||||
version_added: 1.5
|
||||
credentials:
|
||||
description:
|
||||
- File to find the Rackspace credentials in (ignored if I(api_key) and
|
||||
I(username) are provided)
|
||||
default: null
|
||||
aliases:
|
||||
- creds_file
|
||||
env:
|
||||
description:
|
||||
- Environment as configured in ~/.pyrax.cfg,
|
||||
see https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration
|
||||
version_added: 1.5
|
||||
identity_type:
|
||||
description:
|
||||
- Authentication machanism to use, such as rackspace or keystone
|
||||
default: rackspace
|
||||
version_added: 1.5
|
||||
region:
|
||||
description:
|
||||
- Region to create an instance in
|
||||
default: DFW
|
||||
tenant_id:
|
||||
description:
|
||||
- The tenant ID used for authentication
|
||||
version_added: 1.5
|
||||
tenant_name:
|
||||
description:
|
||||
- The tenant name used for authentication
|
||||
version_added: 1.5
|
||||
username:
|
||||
description:
|
||||
- Rackspace username (overrides I(credentials))
|
||||
verify_ssl:
|
||||
description:
|
||||
- Whether or not to require SSL validation of API endpoints
|
||||
version_added: 1.5
|
||||
address:
|
||||
description:
|
||||
- Server IP address to retrieve facts for, will match any IP assigned to
|
||||
|
@ -79,15 +35,8 @@ options:
|
|||
description:
|
||||
- Server name to retrieve facts for
|
||||
default: null
|
||||
requirements: [ "pyrax" ]
|
||||
author: Matt Martz
|
||||
notes:
|
||||
- The following environment variables can be used, C(RAX_USERNAME),
|
||||
C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
|
||||
- C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
|
||||
appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
|
||||
- C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
|
||||
- C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
|
||||
extends_documentation_fragment: rackspace.openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -106,16 +55,13 @@ EXAMPLES = '''
|
|||
ansible_ssh_host: "{{ rax_accessipv4 }}"
|
||||
'''
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
from types import NoneType
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
print("failed=True msg='pyrax required for this module'")
|
||||
sys.exit(1)
|
||||
HAS_PYRAX = False
|
||||
|
||||
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
|
||||
|
||||
|
@ -138,6 +84,12 @@ def rax_facts(module, address, name, server_id):
|
|||
changed = False
|
||||
|
||||
cs = pyrax.cloudservers
|
||||
|
||||
if cs is None:
|
||||
module.fail_json(msg='Failed to instantiate client. This '
|
||||
'typically indicates an invalid region or an '
|
||||
'incorrectly capitalized region name.')
|
||||
|
||||
ansible_facts = {}
|
||||
|
||||
search_opts = {}
|
||||
|
@ -190,6 +142,9 @@ def main():
|
|||
required_one_of=[['address', 'id', 'name']],
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
address = module.params.get('address')
|
||||
server_id = module.params.get('id')
|
||||
name = module.params.get('name')
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/python -tt
|
||||
#!/usr/bin/python
|
||||
|
||||
# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
|
||||
#
|
||||
|
@ -17,6 +17,8 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_files
|
||||
|
@ -25,25 +27,18 @@ description:
|
|||
- Manipulate Rackspace Cloud Files Containers
|
||||
version_added: "1.5"
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Rackspace API key (overrides I(credentials))
|
||||
clear_meta:
|
||||
description:
|
||||
- Optionally clear existing metadata when applying metadata to existing containers.
|
||||
Selecting this option is only appropriate when setting type=meta
|
||||
choices: ["yes", "no"]
|
||||
choices:
|
||||
- "yes"
|
||||
- "no"
|
||||
default: "no"
|
||||
container:
|
||||
description:
|
||||
- The container to use for container or metadata operations.
|
||||
required: true
|
||||
credentials:
|
||||
description:
|
||||
- File to find the Rackspace credentials in (ignored if I(api_key) and
|
||||
I(username) are provided)
|
||||
default: null
|
||||
aliases: ['creds_file']
|
||||
meta:
|
||||
description:
|
||||
- A hash of items to set as metadata values on a container
|
||||
|
@ -59,6 +54,11 @@ options:
|
|||
description:
|
||||
- Region to create an instance in
|
||||
default: DFW
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
ttl:
|
||||
description:
|
||||
- In seconds, set a container-wide TTL for all objects cached on CDN edge nodes.
|
||||
|
@ -66,26 +66,18 @@ options:
|
|||
type:
|
||||
description:
|
||||
- Type of object to do work on, i.e. metadata object or a container object
|
||||
choices: ["file", "meta"]
|
||||
default: "file"
|
||||
username:
|
||||
description:
|
||||
- Rackspace username (overrides I(credentials))
|
||||
choices:
|
||||
- file
|
||||
- meta
|
||||
default: file
|
||||
web_error:
|
||||
description:
|
||||
- Sets an object to be presented as the HTTP error page when accessed by the CDN URL
|
||||
web_index:
|
||||
description:
|
||||
- Sets an object to be presented as the HTTP index page when accessed by the CDN URL
|
||||
requirements: [ "pyrax" ]
|
||||
author: Paul Durivage
|
||||
notes:
|
||||
- The following environment variables can be used, C(RAX_USERNAME),
|
||||
C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
|
||||
- C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
|
||||
appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
|
||||
- C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
|
||||
- C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
|
||||
extends_documentation_fragment: rackspace
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -151,9 +143,9 @@ from ansible import __version__
|
|||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError, e:
|
||||
print("failed=True msg='pyrax is required for this module'")
|
||||
sys.exit(1)
|
||||
HAS_PYRAX = False
|
||||
|
||||
EXIT_DICT = dict(success=True)
|
||||
META_PREFIX = 'x-container-meta-'
|
||||
|
@ -208,7 +200,8 @@ def meta(cf, module, container_, state, meta_, clear_meta):
|
|||
module.exit_json(**EXIT_DICT)
|
||||
|
||||
|
||||
def container(cf, module, container_, state, meta_, clear_meta, ttl, public, private, web_index, web_error):
|
||||
def container(cf, module, container_, state, meta_, clear_meta, ttl, public,
|
||||
private, web_index, web_error):
|
||||
if public and private:
|
||||
module.fail_json(msg='container cannot be simultaneously '
|
||||
'set to public and private')
|
||||
|
@ -232,6 +225,7 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, pri
|
|||
except Exception, e:
|
||||
module.fail_json(msg=e.message)
|
||||
else:
|
||||
EXIT_DICT['changed'] = True
|
||||
EXIT_DICT['created'] = True
|
||||
else:
|
||||
module.fail_json(msg=e.message)
|
||||
|
@ -304,11 +298,9 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, pri
|
|||
EXIT_DICT['container'] = c.name
|
||||
EXIT_DICT['objs_in_container'] = c.object_count
|
||||
EXIT_DICT['total_bytes'] = c.total_bytes
|
||||
|
||||
|
||||
_locals = locals().keys()
|
||||
|
||||
if ('cont_created' in _locals
|
||||
or 'cont_deleted' in _locals
|
||||
if ('cont_deleted' in _locals
|
||||
or 'meta_set' in _locals
|
||||
or 'cont_public' in _locals
|
||||
or 'cont_private' in _locals
|
||||
|
@ -319,15 +311,23 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, pri
|
|||
module.exit_json(**EXIT_DICT)
|
||||
|
||||
|
||||
def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, private, web_index, web_error):
|
||||
""" Dispatch from here to work with metadata or file objects """
|
||||
cf = pyrax.cloudfiles
|
||||
cf.user_agent = USER_AGENT
|
||||
def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
|
||||
private, web_index, web_error):
|
||||
""" Dispatch from here to work with metadata or file objects """
|
||||
cf = pyrax.cloudfiles
|
||||
|
||||
if typ == "container":
|
||||
container(cf, module, container_, state, meta_, clear_meta, ttl, public, private, web_index, web_error)
|
||||
else:
|
||||
meta(cf, module, container_, state, meta_, clear_meta)
|
||||
if cf is None:
|
||||
module.fail_json(msg='Failed to instantiate client. This '
|
||||
'typically indicates an invalid region or an '
|
||||
'incorrectly capitalized region name.')
|
||||
|
||||
cf.user_agent = USER_AGENT
|
||||
|
||||
if typ == "container":
|
||||
container(cf, module, container_, state, meta_, clear_meta, ttl,
|
||||
public, private, web_index, web_error)
|
||||
else:
|
||||
meta(cf, module, container_, state, meta_, clear_meta)
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -335,13 +335,14 @@ def main():
|
|||
argument_spec.update(
|
||||
dict(
|
||||
container=dict(),
|
||||
state=dict(choices=['present', 'absent', 'list'], default='present'),
|
||||
state=dict(choices=['present', 'absent', 'list'],
|
||||
default='present'),
|
||||
meta=dict(type='dict', default=dict()),
|
||||
clear_meta=dict(choices=BOOLEANS, default=False, type='bool'),
|
||||
clear_meta=dict(default=False, type='bool'),
|
||||
type=dict(choices=['container', 'meta'], default='container'),
|
||||
ttl=dict(type='int'),
|
||||
public=dict(choices=BOOLEANS, default=False, type='bool'),
|
||||
private=dict(choices=BOOLEANS, default=False, type='bool'),
|
||||
public=dict(default=False, type='bool'),
|
||||
private=dict(default=False, type='bool'),
|
||||
web_index=dict(),
|
||||
web_error=dict()
|
||||
)
|
||||
|
@ -352,6 +353,9 @@ def main():
|
|||
required_together=rax_required_together()
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
container_ = module.params.get('container')
|
||||
state = module.params.get('state')
|
||||
meta_ = module.params.get('meta')
|
||||
|
@ -366,10 +370,12 @@ def main():
|
|||
if state in ['present', 'absent'] and not container_:
|
||||
module.fail_json(msg='please specify a container name')
|
||||
if clear_meta and not typ == 'meta':
|
||||
module.fail_json(msg='clear_meta can only be used when setting metadata')
|
||||
module.fail_json(msg='clear_meta can only be used when setting '
|
||||
'metadata')
|
||||
|
||||
setup_rax_module(module, pyrax)
|
||||
cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, private, web_index, web_error)
|
||||
cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
|
||||
private, web_index, web_error)
|
||||
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/python -tt
|
||||
#!/usr/bin/python
|
||||
|
||||
# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
|
||||
#
|
||||
|
@ -17,6 +17,8 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_files_objects
|
||||
|
@ -25,26 +27,19 @@ description:
|
|||
- Upload, download, and delete objects in Rackspace Cloud Files
|
||||
version_added: "1.5"
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Rackspace API key (overrides I(credentials))
|
||||
default: null
|
||||
clear_meta:
|
||||
description:
|
||||
- Optionally clear existing metadata when applying metadata to existing objects.
|
||||
Selecting this option is only appropriate when setting type=meta
|
||||
choices: ["yes", "no"]
|
||||
choices:
|
||||
- "yes"
|
||||
- "no"
|
||||
default: "no"
|
||||
container:
|
||||
description:
|
||||
- The container to use for file object operations.
|
||||
required: true
|
||||
default: null
|
||||
credentials:
|
||||
description:
|
||||
- File to find the Rackspace credentials in (ignored if I(api_key) and I(username) are provided)
|
||||
default: null
|
||||
aliases: ['creds_file']
|
||||
dest:
|
||||
description:
|
||||
- The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder".
|
||||
|
@ -64,12 +59,11 @@ options:
|
|||
- The method of operation to be performed. For example, put to upload files
|
||||
to Cloud Files, get to download files from Cloud Files or delete to delete
|
||||
remote objects in Cloud Files
|
||||
choices: ["get", "put", "delete"]
|
||||
default: "get"
|
||||
region:
|
||||
description:
|
||||
- Region in which to work. Maps to a Rackspace Cloud region, i.e. DFW, ORD, IAD, SYD, LON
|
||||
default: DFW
|
||||
choices:
|
||||
- get
|
||||
- put
|
||||
- delete
|
||||
default: get
|
||||
src:
|
||||
description:
|
||||
- Source from which to upload files. Used to specify a remote object as a source for
|
||||
|
@ -81,27 +75,25 @@ options:
|
|||
- Used to specify whether to maintain nested directory structure when downloading objects
|
||||
from Cloud Files. Setting to false downloads the contents of a container to a single,
|
||||
flat directory
|
||||
choices: ["yes", "no"]
|
||||
choices:
|
||||
- yes
|
||||
- "no"
|
||||
default: "yes"
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
type:
|
||||
description:
|
||||
- Type of object to do work on
|
||||
- Metadata object or a file object
|
||||
choices: ["file", "meta"]
|
||||
default: "file"
|
||||
username:
|
||||
description:
|
||||
- Rackspace username (overrides I(credentials))
|
||||
default: null
|
||||
requirements: [ "pyrax" ]
|
||||
choices:
|
||||
- file
|
||||
- meta
|
||||
default: file
|
||||
author: Paul Durivage
|
||||
notes:
|
||||
- The following environment variables can be used, C(RAX_USERNAME), C(RAX_API_KEY),
|
||||
C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
|
||||
- C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file appropriate
|
||||
for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
|
||||
- C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
|
||||
- C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
|
||||
extends_documentation_fragment: rackspace
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -195,9 +187,9 @@ import os
|
|||
|
||||
try:
|
||||
import pyrax
|
||||
except ImportError, e:
|
||||
print("failed=True msg='pyrax is required for this module'")
|
||||
sys.exit(1)
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
HAS_PYRAX = False
|
||||
|
||||
EXIT_DICT = dict(success=False)
|
||||
META_PREFIX = 'x-object-meta-'
|
||||
|
@ -441,7 +433,6 @@ def get_meta(module, cf, container, src, dest):
|
|||
meta_key = k.split(META_PREFIX)[-1]
|
||||
results[obj][meta_key] = v
|
||||
|
||||
|
||||
EXIT_DICT['container'] = c.name
|
||||
if results:
|
||||
EXIT_DICT['meta_results'] = results
|
||||
|
@ -538,28 +529,33 @@ def delete_meta(module, cf, container, src, dest, meta):
|
|||
|
||||
def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
|
||||
structure, expires):
|
||||
""" Dispatch from here to work with metadata or file objects """
|
||||
cf = pyrax.cloudfiles
|
||||
""" Dispatch from here to work with metadata or file objects """
|
||||
cf = pyrax.cloudfiles
|
||||
|
||||
if typ == "file":
|
||||
if method == 'put':
|
||||
upload(module, cf, container, src, dest, meta, expires)
|
||||
if cf is None:
|
||||
module.fail_json(msg='Failed to instantiate client. This '
|
||||
'typically indicates an invalid region or an '
|
||||
'incorrectly capitalized region name.')
|
||||
|
||||
elif method == 'get':
|
||||
download(module, cf, container, src, dest, structure)
|
||||
if typ == "file":
|
||||
if method == 'put':
|
||||
upload(module, cf, container, src, dest, meta, expires)
|
||||
|
||||
elif method == 'delete':
|
||||
delete(module, cf, container, src, dest)
|
||||
elif method == 'get':
|
||||
download(module, cf, container, src, dest, structure)
|
||||
|
||||
else:
|
||||
if method == 'get':
|
||||
get_meta(module, cf, container, src, dest)
|
||||
elif method == 'delete':
|
||||
delete(module, cf, container, src, dest)
|
||||
|
||||
if method == 'put':
|
||||
put_meta(module, cf, container, src, dest, meta, clear_meta)
|
||||
else:
|
||||
if method == 'get':
|
||||
get_meta(module, cf, container, src, dest)
|
||||
|
||||
if method == 'delete':
|
||||
delete_meta(module, cf, container, src, dest, meta)
|
||||
if method == 'put':
|
||||
put_meta(module, cf, container, src, dest, meta, clear_meta)
|
||||
|
||||
if method == 'delete':
|
||||
delete_meta(module, cf, container, src, dest, meta)
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -572,8 +568,8 @@ def main():
|
|||
method=dict(default='get', choices=['put', 'get', 'delete']),
|
||||
type=dict(default='file', choices=['file', 'meta']),
|
||||
meta=dict(type='dict', default=dict()),
|
||||
clear_meta=dict(choices=BOOLEANS, default=False, type='bool'),
|
||||
structure=dict(choices=BOOLEANS, default=True, type='bool'),
|
||||
clear_meta=dict(default=False, type='bool'),
|
||||
structure=dict(default=True, type='bool'),
|
||||
expires=dict(type='int'),
|
||||
)
|
||||
)
|
||||
|
@ -583,6 +579,9 @@ def main():
|
|||
required_together=rax_required_together()
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
container = module.params.get('container')
|
||||
src = module.params.get('src')
|
||||
dest = module.params.get('dest')
|
||||
|
@ -603,4 +602,4 @@ def main():
|
|||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.rax import *
|
||||
|
||||
main()
|
||||
main()
|
||||
|
|
117
cloud/rax_identity
Normal file
117
cloud/rax_identity
Normal file
|
@ -0,0 +1,117 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_identity
|
||||
short_description: Load Rackspace Cloud Identity
|
||||
description:
|
||||
- Verifies Rackspace Cloud credentials and returns identity information
|
||||
version_added: "1.5"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
author: Christopher H. Laco, Matt Martz
|
||||
extends_documentation_fragment: rackspace.openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Load Rackspace Cloud Identity
|
||||
gather_facts: False
|
||||
hosts: local
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Load Identity
|
||||
local_action:
|
||||
module: rax_identity
|
||||
credentials: ~/.raxpub
|
||||
region: DFW
|
||||
register: rackspace_identity
|
||||
'''
|
||||
|
||||
from types import NoneType
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
HAS_PYRAX = False
|
||||
|
||||
|
||||
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
|
||||
|
||||
|
||||
def cloud_identity(module, state, identity):
|
||||
for arg in (state, identity):
|
||||
if not arg:
|
||||
module.fail_json(msg='%s is required for rax_identity' % arg)
|
||||
|
||||
instance = dict(
|
||||
authenticated=identity.authenticated,
|
||||
credentials=identity._creds_file
|
||||
)
|
||||
changed = False
|
||||
|
||||
for key, value in vars(identity).iteritems():
|
||||
if (isinstance(value, NON_CALLABLES) and
|
||||
not key.startswith('_')):
|
||||
instance[key] = value
|
||||
|
||||
if state == 'present':
|
||||
if not identity.authenticated:
|
||||
module.fail_json(msg='Credentials could not be verified!')
|
||||
|
||||
module.exit_json(changed=changed, identity=instance)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = rax_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
state=dict(default='present', choices=['present', 'absent'])
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_together=rax_required_together()
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
state = module.params.get('state')
|
||||
|
||||
setup_rax_module(module, pyrax)
|
||||
|
||||
if pyrax.identity is None:
|
||||
module.fail_json(msg='Failed to instantiate client. This '
|
||||
'typically indicates an invalid region or an '
|
||||
'incorrectly capitalized region name.')
|
||||
|
||||
cloud_identity(module, state, pyrax.identity)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.rax import *
|
||||
|
||||
### invoke the module
|
||||
main()
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/python -tt
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
|
@ -14,6 +14,8 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_keypair
|
||||
|
@ -22,52 +24,6 @@ description:
|
|||
- Create a keypair for use with Rackspace Cloud Servers
|
||||
version_added: 1.5
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Rackspace API key (overrides I(credentials))
|
||||
aliases:
|
||||
- password
|
||||
auth_endpoint:
|
||||
description:
|
||||
- The URI of the authentication service
|
||||
default: https://identity.api.rackspacecloud.com/v2.0/
|
||||
version_added: 1.5
|
||||
credentials:
|
||||
description:
|
||||
- File to find the Rackspace credentials in (ignored if I(api_key) and
|
||||
I(username) are provided)
|
||||
default: null
|
||||
aliases:
|
||||
- creds_file
|
||||
env:
|
||||
description:
|
||||
- Environment as configured in ~/.pyrax.cfg,
|
||||
see https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration
|
||||
version_added: 1.5
|
||||
identity_type:
|
||||
description:
|
||||
- Authentication machanism to use, such as rackspace or keystone
|
||||
default: rackspace
|
||||
version_added: 1.5
|
||||
region:
|
||||
description:
|
||||
- Region to create an instance in
|
||||
default: DFW
|
||||
tenant_id:
|
||||
description:
|
||||
- The tenant ID used for authentication
|
||||
version_added: 1.5
|
||||
tenant_name:
|
||||
description:
|
||||
- The tenant name used for authentication
|
||||
version_added: 1.5
|
||||
username:
|
||||
description:
|
||||
- Rackspace username (overrides I(credentials))
|
||||
verify_ssl:
|
||||
description:
|
||||
- Whether or not to require SSL validation of API endpoints
|
||||
version_added: 1.5
|
||||
name:
|
||||
description:
|
||||
- Name of keypair
|
||||
|
@ -79,24 +35,20 @@ options:
|
|||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource
|
||||
choices: ['present', 'absent']
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
default: present
|
||||
requirements: [ "pyrax" ]
|
||||
author: Matt Martz
|
||||
notes:
|
||||
- The following environment variables can be used, C(RAX_USERNAME),
|
||||
C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
|
||||
- C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
|
||||
appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
|
||||
- C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
|
||||
- C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
|
||||
- Keypairs cannot be manipulated, only created and deleted. To "update" a
|
||||
keypair you must first delete and then recreate.
|
||||
extends_documentation_fragment: rackspace.openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a keypair
|
||||
hosts: local
|
||||
hosts: localhost
|
||||
gather_facts: False
|
||||
tasks:
|
||||
- name: keypair request
|
||||
|
@ -116,17 +68,28 @@ EXAMPLES = '''
|
|||
module: copy
|
||||
content: "{{ keypair.keypair.private_key }}"
|
||||
dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}"
|
||||
'''
|
||||
|
||||
import sys
|
||||
- name: Create a keypair
|
||||
hosts: localhost
|
||||
gather_facts: False
|
||||
tasks:
|
||||
- name: keypair request
|
||||
local_action:
|
||||
module: rax_keypair
|
||||
credentials: ~/.raxpub
|
||||
name: my_keypair
|
||||
public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}"
|
||||
region: DFW
|
||||
register: keypair
|
||||
'''
|
||||
|
||||
from types import NoneType
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
print("failed=True msg='pyrax required for this module'")
|
||||
sys.exit(1)
|
||||
HAS_PYRAX = False
|
||||
|
||||
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
|
||||
|
||||
|
@ -144,6 +107,12 @@ def rax_keypair(module, name, public_key, state):
|
|||
changed = False
|
||||
|
||||
cs = pyrax.cloudservers
|
||||
|
||||
if cs is None:
|
||||
module.fail_json(msg='Failed to instantiate client. This '
|
||||
'typically indicates an invalid region or an '
|
||||
'incorrectly capitalized region name.')
|
||||
|
||||
keypair = {}
|
||||
|
||||
if state == 'present':
|
||||
|
@ -189,6 +158,9 @@ def main():
|
|||
required_together=rax_required_together(),
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
name = module.params.get('name')
|
||||
public_key = module.params.get('public_key')
|
||||
state = module.params.get('state')
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/python -tt
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
|
@ -14,6 +14,8 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_network
|
||||
|
@ -25,20 +27,10 @@ options:
|
|||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource
|
||||
choices: ['present', 'absent']
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
default: present
|
||||
credentials:
|
||||
description:
|
||||
- File to find the Rackspace credentials in (ignored if C(api_key) and
|
||||
C(username) are provided)
|
||||
default: null
|
||||
aliases: ['creds_file']
|
||||
api_key:
|
||||
description:
|
||||
- Rackspace API key (overrides C(credentials))
|
||||
username:
|
||||
description:
|
||||
- Rackspace username (overrides C(credentials))
|
||||
label:
|
||||
description:
|
||||
- Label (name) to give the network
|
||||
|
@ -47,19 +39,8 @@ options:
|
|||
description:
|
||||
- cidr of the network being created
|
||||
default: null
|
||||
region:
|
||||
description:
|
||||
- Region to create the network in
|
||||
default: DFW
|
||||
requirements: [ "pyrax" ]
|
||||
author: Christopher H. Laco, Jesse Keating
|
||||
notes:
|
||||
- The following environment variables can be used, C(RAX_USERNAME),
|
||||
C(RAX_API_KEY), C(RAX_CREDS), C(RAX_CREDENTIALS), C(RAX_REGION).
|
||||
- C(RAX_CREDENTIALS) and C(RAX_CREDS) points to a credentials file
|
||||
appropriate for pyrax
|
||||
- C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
|
||||
- C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
|
||||
extends_documentation_fragment: rackspace.openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -76,16 +57,11 @@ EXAMPLES = '''
|
|||
state: present
|
||||
'''
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
import pyrax.utils
|
||||
from pyrax import exc
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
print("failed=True msg='pyrax required for this module'")
|
||||
sys.exit(1)
|
||||
HAS_PYRAX = False
|
||||
|
||||
|
||||
def cloud_network(module, state, label, cidr):
|
||||
|
@ -97,10 +73,15 @@ def cloud_network(module, state, label, cidr):
|
|||
network = None
|
||||
networks = []
|
||||
|
||||
if not pyrax.cloud_networks:
|
||||
module.fail_json(msg='Failed to instantiate client. This '
|
||||
'typically indicates an invalid region or an '
|
||||
'incorrectly capitalized region name.')
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
network = pyrax.cloud_networks.find_network_by_label(label)
|
||||
except exc.NetworkNotFound:
|
||||
except pyrax.exceptions.NetworkNotFound:
|
||||
try:
|
||||
network = pyrax.cloud_networks.create(label, cidr=cidr)
|
||||
changed = True
|
||||
|
@ -114,7 +95,7 @@ def cloud_network(module, state, label, cidr):
|
|||
network = pyrax.cloud_networks.find_network_by_label(label)
|
||||
network.delete()
|
||||
changed = True
|
||||
except exc.NetworkNotFound:
|
||||
except pyrax.exceptions.NetworkNotFound:
|
||||
pass
|
||||
except Exception, e:
|
||||
module.fail_json(msg='%s' % e.message)
|
||||
|
@ -144,6 +125,9 @@ def main():
|
|||
required_together=rax_required_together(),
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
state = module.params.get('state')
|
||||
label = module.params.get('label')
|
||||
cidr = module.params.get('cidr')
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/python -tt
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
|
@ -14,6 +14,8 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a DOCUMENTATION stub specific to this module, it extends
|
||||
# a documentation fragment located in ansible.utils.module_docs_fragments
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_queue
|
||||
|
@ -22,40 +24,19 @@ description:
|
|||
- creates / deletes a Rackspace Public Cloud queue.
|
||||
version_added: "1.5"
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Rackspace API key (overrides C(credentials))
|
||||
credentials:
|
||||
description:
|
||||
- File to find the Rackspace credentials in (ignored if C(api_key) and
|
||||
C(username) are provided)
|
||||
default: null
|
||||
aliases: ['creds_file']
|
||||
name:
|
||||
description:
|
||||
- Name to give the queue
|
||||
default: null
|
||||
region:
|
||||
description:
|
||||
- Region to create the load balancer in
|
||||
default: DFW
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource
|
||||
choices: ['present', 'absent']
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
default: present
|
||||
username:
|
||||
description:
|
||||
- Rackspace username (overrides C(credentials))
|
||||
requirements: [ "pyrax" ]
|
||||
author: Christopher H. Laco, Matt Martz
|
||||
notes:
|
||||
- The following environment variables can be used, C(RAX_USERNAME),
|
||||
C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
|
||||
- C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
|
||||
appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
|
||||
- C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
|
||||
- C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
|
||||
extends_documentation_fragment: rackspace
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -68,22 +49,17 @@ EXAMPLES = '''
|
|||
local_action:
|
||||
module: rax_queue
|
||||
credentials: ~/.raxpub
|
||||
client_id: unique-client-name
|
||||
name: my-queue
|
||||
region: DFW
|
||||
state: present
|
||||
register: my_queue
|
||||
'''
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
HAS_PYRAX = True
|
||||
except ImportError:
|
||||
print("failed=True msg='pyrax is required for this module'")
|
||||
sys.exit(1)
|
||||
HAS_PYRAX = False
|
||||
|
||||
|
||||
def cloud_queue(module, state, name):
|
||||
|
@ -96,6 +72,10 @@ def cloud_queue(module, state, name):
|
|||
instance = {}
|
||||
|
||||
cq = pyrax.queues
|
||||
if not cq:
|
||||
module.fail_json(msg='Failed to instantiate client. This '
|
||||
'typically indicates an invalid region or an '
|
||||
'incorrectly capitalized region name.')
|
||||
|
||||
for queue in cq.list():
|
||||
if name != queue.name:
|
||||
|
@ -146,6 +126,9 @@ def main():
|
|||
required_together=rax_required_together()
|
||||
)
|
||||
|
||||
if not HAS_PYRAX:
|
||||
module.fail_json(msg='pyrax is required for this module')
|
||||
|
||||
name = module.params.get('name')
|
||||
state = module.params.get('state')
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ options:
|
|||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
choices: [ 'db.t1.micro', 'db.m1.small', 'db.m1.medium', 'db.m1.large', 'db.m1.xlarge', 'db.m2.xlarge', 'db.m2.2xlarge', 'db.m2.4xlarge' ]
|
||||
choices: [ 'db.t1.micro', 'db.m1.small', 'db.m1.medium', 'db.m1.large', 'db.m1.xlarge', 'db.m2.xlarge', 'db.m2.2xlarge', 'db.m2.4xlarge', 'db.m3.medium', 'db.m3.large', 'db.m3.xlarge', 'db.m3.2xlarge', 'db.cr1.8xlarge' ]
|
||||
username:
|
||||
description:
|
||||
- Master database username. Used only when command=create.
|
||||
|
@ -131,7 +131,7 @@ options:
|
|||
aliases: []
|
||||
port:
|
||||
description:
|
||||
- Port number that the DB instance uses for connections. Defaults to 3306 for mysql, 1521 for Oracle, 1443 for SQL Server. Used only when command=create or command=replicate.
|
||||
- Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1443 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
|
@ -290,7 +290,7 @@ def main():
|
|||
source_instance = dict(required=False),
|
||||
db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False),
|
||||
size = dict(required=False),
|
||||
instance_type = dict(aliases=['type'], choices=['db.t1.micro', 'db.m1.small', 'db.m1.medium', 'db.m1.large', 'db.m1.xlarge', 'db.m2.xlarge', 'db.m2.2xlarge', 'db.m2.4xlarge'], required=False),
|
||||
instance_type = dict(aliases=['type'], choices=['db.t1.micro', 'db.m1.small', 'db.m1.medium', 'db.m1.large', 'db.m1.xlarge', 'db.m2.xlarge', 'db.m2.2xlarge', 'db.m2.4xlarge', 'db.m3.medium', 'db.m3.large', 'db.m3.xlarge', 'db.m3.2xlarge', 'db.cr1.8xlarge'], required=False),
|
||||
username = dict(required=False),
|
||||
password = dict(no_log=True, required=False),
|
||||
db_name = dict(required=False),
|
||||
|
@ -343,7 +343,7 @@ def main():
|
|||
maint_window = module.params.get('maint_window')
|
||||
subnet = module.params.get('subnet')
|
||||
backup_window = module.params.get('backup_window')
|
||||
backup_retention = module.params.get('module_retention')
|
||||
backup_retention = module.params.get('backup_retention')
|
||||
region = module.params.get('region')
|
||||
zone = module.params.get('zone')
|
||||
aws_secret_key = module.params.get('aws_secret_key')
|
||||
|
|
|
@ -157,7 +157,7 @@ def commit(changes):
|
|||
time.sleep(500)
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_keys_spec()
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
command = dict(choices=['get', 'create', 'delete'], required=True),
|
||||
zone = dict(required=True),
|
||||
|
@ -220,11 +220,16 @@ def main():
|
|||
found_record = False
|
||||
sets = conn.get_all_rrsets(zones[zone_in])
|
||||
for rset in sets:
|
||||
if rset.type == type_in and rset.name == record_in:
|
||||
# Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round
|
||||
# tripping of things like * and @.
|
||||
decoded_name = rset.name.replace(r'\052', '*')
|
||||
decoded_name = rset.name.replace(r'\100', '@')
|
||||
|
||||
if rset.type == type_in and decoded_name == record_in:
|
||||
found_record = True
|
||||
record['zone'] = zone_in
|
||||
record['type'] = rset.type
|
||||
record['record'] = rset.name
|
||||
record['record'] = decoded_name
|
||||
record['ttl'] = rset.ttl
|
||||
record['value'] = ','.join(sorted(rset.resource_records))
|
||||
record['values'] = sorted(rset.resource_records)
|
||||
|
|
73
cloud/s3
73
cloud/s3
|
@ -68,7 +68,7 @@ options:
|
|||
aliases: []
|
||||
s3_url:
|
||||
description:
|
||||
- S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined.
|
||||
- "S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined. Ansible tries to guess if fakes3 (https://github.com/jubos/fake-s3) or Eucalyptus Walrus (https://github.com/eucalyptus/eucalyptus/wiki/Walrus) is used and configure connection accordingly. Current heuristic is: everything with scheme fakes3:// is fakes3, everything else not ending with amazonaws.com is Walrus."
|
||||
default: null
|
||||
aliases: [ S3_URL ]
|
||||
aws_secret_key:
|
||||
|
@ -83,6 +83,13 @@ options:
|
|||
required: false
|
||||
default: null
|
||||
aliases: [ 'ec2_access_key', 'access_key' ]
|
||||
metadata:
|
||||
description:
|
||||
- Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
|
||||
required: false
|
||||
default: null
|
||||
version_added: "1.6"
|
||||
|
||||
requirements: [ "boto" ]
|
||||
author: Lester Wade, Ralph Tice
|
||||
'''
|
||||
|
@ -97,7 +104,11 @@ EXAMPLES = '''
|
|||
# GET/download and do not overwrite local file (trust remote)
|
||||
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get force=false
|
||||
# PUT/upload and overwrite remote file (trust local)
|
||||
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put
|
||||
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put
|
||||
# PUT/upload with metadata
|
||||
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip'
|
||||
# PUT/upload with multiple metadata
|
||||
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache'
|
||||
# PUT/upload and do not overwrite remote file (trust local)
|
||||
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put force=false
|
||||
# Download an object as a string to use else where in your playbook
|
||||
|
@ -134,11 +145,12 @@ def key_check(module, s3, bucket, obj):
|
|||
def keysum(module, s3, bucket, obj):
|
||||
bucket = s3.lookup(bucket)
|
||||
key_check = bucket.get_key(obj)
|
||||
if key_check:
|
||||
md5_remote = key_check.etag[1:-1]
|
||||
etag_multipart = md5_remote.find('-')!=-1 #Check for multipart, etag is not md5
|
||||
if etag_multipart is True:
|
||||
module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.")
|
||||
if not key_check:
|
||||
return None
|
||||
md5_remote = key_check.etag[1:-1]
|
||||
etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
|
||||
if etag_multipart is True:
|
||||
module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.")
|
||||
return md5_remote
|
||||
|
||||
def bucket_check(module, s3, bucket):
|
||||
|
@ -201,10 +213,14 @@ def path_check(path):
|
|||
else:
|
||||
return False
|
||||
|
||||
def upload_s3file(module, s3, bucket, obj, src, expiry):
|
||||
def upload_s3file(module, s3, bucket, obj, src, expiry, metadata):
|
||||
try:
|
||||
bucket = s3.lookup(bucket)
|
||||
key = bucket.new_key(obj)
|
||||
key = bucket.new_key(obj)
|
||||
if metadata:
|
||||
for meta_key in metadata.keys():
|
||||
key.set_metadata(meta_key, metadata[meta_key])
|
||||
|
||||
key.set_contents_from_filename(src)
|
||||
url = key.generate_url(expiry)
|
||||
module.exit_json(msg="PUT operation complete", url=url, changed=True)
|
||||
|
@ -238,6 +254,13 @@ def get_download_url(module, s3, bucket, obj, expiry, changed=True):
|
|||
except s3.provider.storage_response_error, e:
|
||||
module.fail_json(msg= str(e))
|
||||
|
||||
def is_fakes3(s3_url):
|
||||
""" Return True if s3_url has scheme fakes3:// """
|
||||
if s3_url is not None:
|
||||
return urlparse.urlparse(s3_url).scheme == 'fakes3'
|
||||
else:
|
||||
return False
|
||||
|
||||
def is_walrus(s3_url):
|
||||
""" Return True if it's Walrus endpoint, not S3
|
||||
|
||||
|
@ -249,7 +272,7 @@ def is_walrus(s3_url):
|
|||
return False
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_keys_spec()
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
bucket = dict(required=True),
|
||||
object = dict(),
|
||||
|
@ -259,7 +282,8 @@ def main():
|
|||
expiry = dict(default=600, aliases=['expiration']),
|
||||
s3_url = dict(aliases=['S3_URL']),
|
||||
overwrite = dict(aliases=['force'], default=True, type='bool'),
|
||||
)
|
||||
metadata = dict(type='dict'),
|
||||
),
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
|
@ -272,6 +296,7 @@ def main():
|
|||
expiry = int(module.params['expiry'])
|
||||
s3_url = module.params.get('s3_url')
|
||||
overwrite = module.params.get('overwrite')
|
||||
metadata = module.params.get('metadata')
|
||||
|
||||
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
|
||||
|
||||
|
@ -282,8 +307,22 @@ def main():
|
|||
if not s3_url and 'S3_URL' in os.environ:
|
||||
s3_url = os.environ['S3_URL']
|
||||
|
||||
# If we have an S3_URL env var set, this is likely to be Walrus, so change connection method
|
||||
if is_walrus(s3_url):
|
||||
# Look at s3_url and tweak connection settings
|
||||
# if connecting to Walrus or fakes3
|
||||
if is_fakes3(s3_url):
|
||||
try:
|
||||
fakes3 = urlparse.urlparse(s3_url)
|
||||
from boto.s3.connection import OrdinaryCallingFormat
|
||||
s3 = boto.connect_s3(
|
||||
aws_access_key,
|
||||
aws_secret_key,
|
||||
is_secure=False,
|
||||
host=fakes3.hostname,
|
||||
port=fakes3.port,
|
||||
calling_format=OrdinaryCallingFormat())
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg = str(e))
|
||||
elif is_walrus(s3_url):
|
||||
try:
|
||||
walrus = urlparse.urlparse(s3_url).hostname
|
||||
s3 = boto.connect_walrus(walrus, aws_access_key, aws_secret_key)
|
||||
|
@ -364,24 +403,24 @@ def main():
|
|||
if md5_local == md5_remote:
|
||||
sum_matches = True
|
||||
if overwrite is True:
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry)
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
|
||||
else:
|
||||
get_download_url(module, s3, bucket, obj, expiry, changed=False)
|
||||
else:
|
||||
sum_matches = False
|
||||
if overwrite is True:
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry)
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
|
||||
else:
|
||||
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True)
|
||||
|
||||
# If neither exist (based on bucket existence), we can create both.
|
||||
if bucketrtn is False and pathrtn is True:
|
||||
create_bucket(module, s3, bucket)
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry)
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
|
||||
|
||||
# If bucket exists but key doesn't, just upload.
|
||||
if bucketrtn is True and pathrtn is True and keyrtn is False:
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry)
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
|
||||
|
||||
# Support for deleting an object if we have both params.
|
||||
if mode == 'delete':
|
||||
|
|
51
cloud/virt
51
cloud/virt
|
@ -36,7 +36,7 @@ options:
|
|||
since these refer only to VM states. After starting a guest, it may not
|
||||
be immediately accessible.
|
||||
required: false
|
||||
choices: [ "running", "shutdown" ]
|
||||
choices: [ "running", "shutdown", "destroyed", "paused" ]
|
||||
default: "no"
|
||||
command:
|
||||
description:
|
||||
|
@ -108,18 +108,19 @@ VIRT_STATE_NAME_MAP = {
|
|||
6 : "crashed"
|
||||
}
|
||||
|
||||
class VMNotFound(Exception):
|
||||
class VMNotFound(Exception):
|
||||
pass
|
||||
|
||||
class LibvirtConnection(object):
|
||||
|
||||
def __init__(self, uri):
|
||||
def __init__(self, uri, module):
|
||||
|
||||
cmd = subprocess.Popen("uname -r", shell=True, stdout=subprocess.PIPE,
|
||||
close_fds=True)
|
||||
output = cmd.communicate()[0]
|
||||
self.module = module
|
||||
|
||||
if output.find("xen") != -1:
|
||||
cmd = "uname -r"
|
||||
rc, stdout, stderr = self.module.run_command(cmd)
|
||||
|
||||
if "xen" in stdout:
|
||||
conn = libvirt.open(None)
|
||||
else:
|
||||
conn = libvirt.open(uri)
|
||||
|
@ -196,6 +197,10 @@ class LibvirtConnection(object):
|
|||
def get_type(self):
|
||||
return self.conn.getType()
|
||||
|
||||
def get_xml(self, vmid):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.XMLDesc(0)
|
||||
|
||||
def get_maxVcpus(self, vmid):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.maxVcpus()
|
||||
|
@ -221,11 +226,12 @@ class LibvirtConnection(object):
|
|||
|
||||
class Virt(object):
|
||||
|
||||
def __init__(self, uri):
|
||||
def __init__(self, uri, module):
|
||||
self.module = module
|
||||
self.uri = uri
|
||||
|
||||
def __get_conn(self):
|
||||
self.conn = LibvirtConnection(self.uri)
|
||||
self.conn = LibvirtConnection(self.uri, self.module)
|
||||
return self.conn
|
||||
|
||||
def get_vm(self, vmid):
|
||||
|
@ -359,14 +365,8 @@ class Virt(object):
|
|||
Return an xml describing vm config returned by a libvirt call
|
||||
"""
|
||||
|
||||
conn = libvirt.openReadOnly(None)
|
||||
if not conn:
|
||||
return (-1,'Failed to open connection to the hypervisor')
|
||||
try:
|
||||
domV = conn.lookupByName(vmid)
|
||||
except:
|
||||
return (-1,'Failed to find the main domain')
|
||||
return domV.XMLDesc(0)
|
||||
self.__get_conn()
|
||||
return self.conn.get_xml(vmid)
|
||||
|
||||
def get_maxVcpus(self, vmid):
|
||||
"""
|
||||
|
@ -399,7 +399,7 @@ def core(module):
|
|||
uri = module.params.get('uri', None)
|
||||
xml = module.params.get('xml', None)
|
||||
|
||||
v = Virt(uri)
|
||||
v = Virt(uri, module)
|
||||
res = {}
|
||||
|
||||
if state and command=='list_vms':
|
||||
|
@ -414,13 +414,24 @@ def core(module):
|
|||
|
||||
res['changed'] = False
|
||||
if state == 'running':
|
||||
if v.status(guest) is not 'running':
|
||||
if v.status(guest) is 'paused':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.unpause(guest)
|
||||
elif v.status(guest) is not 'running':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.start(guest)
|
||||
elif state == 'shutdown':
|
||||
if v.status(guest) is not 'shutdown':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.shutdown(guest)
|
||||
elif state == 'destroyed':
|
||||
if v.status(guest) is not 'shutdown':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.destroy(guest)
|
||||
elif state == 'paused':
|
||||
if v.status(guest) is 'running':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.pause(guest)
|
||||
else:
|
||||
module.fail_json(msg="unexpected state")
|
||||
|
||||
|
@ -459,7 +470,7 @@ def main():
|
|||
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
name = dict(aliases=['guest']),
|
||||
state = dict(choices=['running', 'shutdown']),
|
||||
state = dict(choices=['running', 'shutdown', 'destroyed', 'paused']),
|
||||
command = dict(choices=ALL_COMMANDS),
|
||||
uri = dict(default='qemu:///system'),
|
||||
xml = dict(),
|
||||
|
|
|
@ -39,7 +39,8 @@ description:
|
|||
options:
|
||||
free_form:
|
||||
description:
|
||||
- the command module takes a free form command to run
|
||||
- the command module takes a free form command to run. There is no parameter actually named 'free form'.
|
||||
See the examples!
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
|
@ -136,7 +137,7 @@ def main():
|
|||
args = shlex.split(args)
|
||||
startd = datetime.datetime.now()
|
||||
|
||||
rc, out, err = module.run_command(args, executable=executable)
|
||||
rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell)
|
||||
|
||||
endd = datetime.datetime.now()
|
||||
delta = endd - startd
|
||||
|
@ -180,7 +181,7 @@ class CommandModule(AnsibleModule):
|
|||
params['removes'] = None
|
||||
params['shell'] = False
|
||||
params['executable'] = None
|
||||
if args.find("#USE_SHELL") != -1:
|
||||
if "#USE_SHELL" in args:
|
||||
args = args.replace("#USE_SHELL", "")
|
||||
params['shell'] = True
|
||||
|
||||
|
|
|
@ -14,7 +14,8 @@ version_added: "0.2"
|
|||
options:
|
||||
free_form:
|
||||
description:
|
||||
- The shell module takes a free form command to run
|
||||
- The shell module takes a free form command to run, as a string. There's not an actual
|
||||
option named "free form". See the examples!
|
||||
required: true
|
||||
default: null
|
||||
creates:
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
# (c) 2012, Elliott Foster <elliott@fourkitchens.com>
|
||||
# Sponsored by Four Kitchens http://fourkitchens.com.
|
||||
# (c) 2014, Epic Games, Inc.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
|
@ -46,6 +47,12 @@ options:
|
|||
- The port to connect to
|
||||
required: false
|
||||
default: 27017
|
||||
replica_set:
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- Replica set to connect to (automatically connects to primary for writes)
|
||||
required: false
|
||||
default: null
|
||||
database:
|
||||
description:
|
||||
- The name of the database to add/remove the user from
|
||||
|
@ -92,12 +99,17 @@ EXAMPLES = '''
|
|||
- mongodb_user: database=burgers name=ben password=12345 roles='read' state=present
|
||||
- mongodb_user: database=burgers name=jim password=12345 roles='readWrite,dbAdmin,userAdmin' state=present
|
||||
- mongodb_user: database=burgers name=joe password=12345 roles='readWriteAnyDatabase' state=present
|
||||
|
||||
# add a user to database in a replica set, the primary server is automatically discovered and written to
|
||||
- mongodb_user: database=burgers name=bob replica_set=blecher password=12345 roles='readWriteAnyDatabase' state=present
|
||||
'''
|
||||
|
||||
import ConfigParser
|
||||
from distutils.version import LooseVersion
|
||||
try:
|
||||
from pymongo.errors import ConnectionFailure
|
||||
from pymongo.errors import OperationFailure
|
||||
from pymongo import version as PyMongoVersion
|
||||
from pymongo import MongoClient
|
||||
except ImportError:
|
||||
try: # for older PyMongo 2.2
|
||||
|
@ -114,34 +126,25 @@ else:
|
|||
#
|
||||
|
||||
def user_add(module, client, db_name, user, password, roles):
|
||||
try:
|
||||
db = client[db_name]
|
||||
if roles is None:
|
||||
db.add_user(user, password, False)
|
||||
else:
|
||||
try:
|
||||
db.add_user(user, password, None, roles=roles)
|
||||
except:
|
||||
module.fail_json(msg='"problem adding user; you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param"')
|
||||
except OperationFailure:
|
||||
return False
|
||||
|
||||
return True
|
||||
db = client[db_name]
|
||||
if roles is None:
|
||||
db.add_user(user, password, False)
|
||||
else:
|
||||
try:
|
||||
db.add_user(user, password, None, roles=roles)
|
||||
except OperationFailure, e:
|
||||
err_msg = str(e)
|
||||
if LooseVersion(PyMongoVersion) <= LooseVersion('2.5'):
|
||||
err_msg = err_msg + ' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)'
|
||||
module.fail_json(msg=err_msg)
|
||||
|
||||
def user_remove(client, db_name, user):
|
||||
try:
|
||||
db = client[db_name]
|
||||
db.remove_user(user)
|
||||
except OperationFailure:
|
||||
return False
|
||||
|
||||
return True
|
||||
db = client[db_name]
|
||||
db.remove_user(user)
|
||||
|
||||
def load_mongocnf():
|
||||
config = ConfigParser.RawConfigParser()
|
||||
mongocnf = os.path.expanduser('~/.mongodb.cnf')
|
||||
if not os.path.exists(mongocnf):
|
||||
return False
|
||||
|
||||
try:
|
||||
config.readfp(open(mongocnf))
|
||||
|
@ -165,6 +168,7 @@ def main():
|
|||
login_password=dict(default=None),
|
||||
login_host=dict(default='localhost'),
|
||||
login_port=dict(default='27017'),
|
||||
replica_set=dict(default=None),
|
||||
database=dict(required=True, aliases=['db']),
|
||||
user=dict(required=True, aliases=['name']),
|
||||
password=dict(aliases=['pass']),
|
||||
|
@ -180,6 +184,7 @@ def main():
|
|||
login_password = module.params['login_password']
|
||||
login_host = module.params['login_host']
|
||||
login_port = module.params['login_port']
|
||||
replica_set = module.params['replica_set']
|
||||
db_name = module.params['database']
|
||||
user = module.params['user']
|
||||
password = module.params['password']
|
||||
|
@ -187,7 +192,20 @@ def main():
|
|||
state = module.params['state']
|
||||
|
||||
try:
|
||||
client = MongoClient(login_host, int(login_port))
|
||||
if replica_set:
|
||||
client = MongoClient(login_host, int(login_port), replicaset=replica_set)
|
||||
else:
|
||||
client = MongoClient(login_host, int(login_port))
|
||||
|
||||
# try to authenticate as a target user to check if it already exists
|
||||
try:
|
||||
client[db_name].authenticate(user, password)
|
||||
if state == 'present':
|
||||
module.exit_json(changed=False, user=user)
|
||||
except OperationFailure:
|
||||
if state == 'absent':
|
||||
module.exit_json(changed=False, user=user)
|
||||
|
||||
if login_user is None and login_password is None:
|
||||
mongocnf_creds = load_mongocnf()
|
||||
if mongocnf_creds is not False:
|
||||
|
@ -200,16 +218,22 @@ def main():
|
|||
client.admin.authenticate(login_user, login_password)
|
||||
|
||||
except ConnectionFailure, e:
|
||||
module.fail_json(msg='unable to connect to database, check login_user and login_password are correct')
|
||||
module.fail_json(msg='unable to connect to database: %s' % str(e))
|
||||
|
||||
if state == 'present':
|
||||
if password is None:
|
||||
module.fail_json(msg='password parameter required when adding a user')
|
||||
if user_add(module, client, db_name, user, password, roles) is not True:
|
||||
module.fail_json(msg='Unable to add or update user, check login_user and login_password are correct and that this user has access to the admin collection')
|
||||
|
||||
try:
|
||||
user_add(module, client, db_name, user, password, roles)
|
||||
except OperationFailure, e:
|
||||
module.fail_json(msg='Unable to add or update user: %s' % str(e))
|
||||
|
||||
elif state == 'absent':
|
||||
if user_remove(client, db_name, user) is not True:
|
||||
module.fail_json(msg='Unable to remove user, check login_user and login_password are correct and that this user has access to the admin collection')
|
||||
try:
|
||||
user_remove(client, db_name, user)
|
||||
except OperationFailure, e:
|
||||
module.fail_json(msg='Unable to remove user: %s' % str(e))
|
||||
|
||||
module.exit_json(changed=True, user=user)
|
||||
|
||||
|
|
|
@ -101,6 +101,7 @@ EXAMPLES = '''
|
|||
|
||||
import ConfigParser
|
||||
import os
|
||||
import pipes
|
||||
try:
|
||||
import MySQLdb
|
||||
except ImportError:
|
||||
|
@ -123,36 +124,36 @@ def db_delete(cursor, db):
|
|||
|
||||
def db_dump(module, host, user, password, db_name, target, port, socket=None):
|
||||
cmd = module.get_bin_path('mysqldump', True)
|
||||
cmd += " --quick --user=%s --password='%s'" %(user, password)
|
||||
cmd += " --quick --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password))
|
||||
if socket is not None:
|
||||
cmd += " --socket=%s" % socket
|
||||
cmd += " --socket=%s" % pipes.quote(socket)
|
||||
else:
|
||||
cmd += " --host=%s --port=%s" % (host, port)
|
||||
cmd += " %s" % db_name
|
||||
cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port))
|
||||
cmd += " %s" % pipes.quote(db_name)
|
||||
if os.path.splitext(target)[-1] == '.gz':
|
||||
cmd = cmd + ' | gzip > ' + target
|
||||
cmd = cmd + ' | gzip > ' + pipes.quote(target)
|
||||
elif os.path.splitext(target)[-1] == '.bz2':
|
||||
cmd = cmd + ' | bzip2 > ' + target
|
||||
cmd = cmd + ' | bzip2 > ' + pipes.quote(target)
|
||||
else:
|
||||
cmd += " > %s" % target
|
||||
rc, stdout, stderr = module.run_command(cmd)
|
||||
cmd += " > %s" % pipes.quote(target)
|
||||
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
|
||||
return rc, stdout, stderr
|
||||
|
||||
def db_import(module, host, user, password, db_name, target, port, socket=None):
|
||||
cmd = module.get_bin_path('mysql', True)
|
||||
cmd += " --user=%s --password='%s'" %(user, password)
|
||||
cmd += " --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password))
|
||||
if socket is not None:
|
||||
cmd += " --socket=%s" % socket
|
||||
cmd += " --socket=%s" % pipes.quote(socket)
|
||||
else:
|
||||
cmd += " --host=%s --port=%s" % (host, port)
|
||||
cmd += " -D %s" % db_name
|
||||
cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port))
|
||||
cmd += " -D %s" % pipes.quote(db_name)
|
||||
if os.path.splitext(target)[-1] == '.gz':
|
||||
cmd = 'gunzip < ' + target + ' | ' + cmd
|
||||
cmd = 'gunzip < ' + pipes.quote(target) + ' | ' + cmd
|
||||
elif os.path.splitext(target)[-1] == '.bz2':
|
||||
cmd = 'bunzip2 < ' + target + ' | ' + cmd
|
||||
cmd = 'bunzip2 < ' + pipes.quote(target) + ' | ' + cmd
|
||||
else:
|
||||
cmd += " < %s" % target
|
||||
rc, stdout, stderr = module.run_command(cmd)
|
||||
cmd += " < %s" % pipes.quote(target)
|
||||
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
|
||||
return rc, stdout, stderr
|
||||
|
||||
def db_create(cursor, db, encoding, collation):
|
||||
|
|
|
@ -325,7 +325,7 @@ def main():
|
|||
if master_password:
|
||||
chm.append("MASTER_PASSWORD='" + master_password + "'")
|
||||
if master_port:
|
||||
chm.append("MASTER_PORT='" + master_port + "'")
|
||||
chm.append("MASTER_PORT=" + master_port)
|
||||
if master_connect_retry:
|
||||
chm.append("MASTER_CONNECT_RETRY='" + master_connect_retry + "'")
|
||||
if master_log_file:
|
||||
|
|
|
@ -259,7 +259,7 @@ def privileges_unpack(priv):
|
|||
output = {}
|
||||
for item in priv.split('/'):
|
||||
pieces = item.split(':')
|
||||
if pieces[0].find('.') != -1:
|
||||
if '.' in pieces[0]:
|
||||
pieces[0] = pieces[0].split('.')
|
||||
for idx, piece in enumerate(pieces):
|
||||
if pieces[0][idx] != "*":
|
||||
|
|
|
@ -76,14 +76,48 @@ else:
|
|||
mysqldb_found = True
|
||||
|
||||
|
||||
def typedvalue(value):
|
||||
"""
|
||||
Convert value to number whenever possible, return same value
|
||||
otherwise.
|
||||
|
||||
>>> typedvalue('3')
|
||||
3
|
||||
>>> typedvalue('3.0')
|
||||
3.0
|
||||
>>> typedvalue('foobar')
|
||||
'foobar'
|
||||
|
||||
"""
|
||||
try:
|
||||
return int(value)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
return float(value)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def getvariable(cursor, mysqlvar):
|
||||
cursor.execute("SHOW VARIABLES LIKE '" + mysqlvar + "'")
|
||||
mysqlvar_val = cursor.fetchall()
|
||||
return mysqlvar_val
|
||||
|
||||
|
||||
def setvariable(cursor, mysqlvar, value):
|
||||
""" Set a global mysql variable to a given value
|
||||
|
||||
The DB driver will handle quoting of the given value based on its
|
||||
type, thus numeric strings like '3.0' or '8' are illegal, they
|
||||
should be passed as numeric literals.
|
||||
|
||||
"""
|
||||
try:
|
||||
cursor.execute("SET GLOBAL " + mysqlvar + "=" + value)
|
||||
cursor.execute("SET GLOBAL " + mysqlvar + " = %s", (value,))
|
||||
cursor.fetchall()
|
||||
result = True
|
||||
except Exception, e:
|
||||
|
@ -203,11 +237,14 @@ def main():
|
|||
else:
|
||||
if len(mysqlvar_val) < 1:
|
||||
module.fail_json(msg="Variable not available", changed=False)
|
||||
if value == mysqlvar_val[0][1]:
|
||||
# Type values before using them
|
||||
value_wanted = typedvalue(value)
|
||||
value_actual = typedvalue(mysqlvar_val[0][1])
|
||||
if value_wanted == value_actual:
|
||||
module.exit_json(msg="Variable already set to requested value", changed=False)
|
||||
result = setvariable(cursor, mysqlvar, value)
|
||||
result = setvariable(cursor, mysqlvar, value_wanted)
|
||||
if result is True:
|
||||
module.exit_json(msg="Variable change succeeded", changed=True)
|
||||
module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, changed=True)
|
||||
else:
|
||||
module.fail_json(msg=result, changed=False)
|
||||
|
||||
|
|
|
@ -597,7 +597,8 @@ def main():
|
|||
except psycopg2.Error, e:
|
||||
conn.rollback()
|
||||
# psycopg2 errors come in connection encoding, reencode
|
||||
msg = e.message.decode(conn.encoding).encode(errors='replace')
|
||||
msg = e.message.decode(conn.encoding).encode(sys.getdefaultencoding(),
|
||||
'replace')
|
||||
module.fail_json(msg=msg)
|
||||
|
||||
if module.check_mode:
|
||||
|
|
|
@ -443,9 +443,9 @@ def main():
|
|||
priv=dict(default=None),
|
||||
db=dict(default=''),
|
||||
port=dict(default='5432'),
|
||||
fail_on_user=dict(type='bool', choices=BOOLEANS, default='yes'),
|
||||
fail_on_user=dict(type='bool', default='yes'),
|
||||
role_attr_flags=dict(default=''),
|
||||
encrypted=dict(type='bool', choices=BOOLEANS, default='no'),
|
||||
encrypted=dict(type='bool', default='no'),
|
||||
expires=dict(default=None)
|
||||
),
|
||||
supports_check_mode = True
|
||||
|
|
|
@ -22,8 +22,9 @@ module: redis
|
|||
short_description: Various redis commands, slave and flush
|
||||
description:
|
||||
- Unified utility to interact with redis instances.
|
||||
'slave' Sets a redis instance in slave or master mode.
|
||||
'flush' Flushes all the instance or a specified db.
|
||||
'slave' sets a redis instance in slave or master mode.
|
||||
'flush' flushes all the instance or a specified db.
|
||||
'config' (new in 1.6), ensures a configuration setting on an instance.
|
||||
version_added: "1.3"
|
||||
options:
|
||||
command:
|
||||
|
@ -31,7 +32,7 @@ options:
|
|||
- The selected redis command
|
||||
required: true
|
||||
default: null
|
||||
choices: [ "slave", "flush" ]
|
||||
choices: [ "slave", "flush", "config" ]
|
||||
login_password:
|
||||
description:
|
||||
- The password used to authenticate with (usually not used)
|
||||
|
@ -75,6 +76,18 @@ options:
|
|||
required: false
|
||||
default: all
|
||||
choices: [ "all", "db" ]
|
||||
name:
|
||||
version_added: 1.6
|
||||
description:
|
||||
- A redis config key.
|
||||
required: false
|
||||
default: null
|
||||
value:
|
||||
version_added: 1.6
|
||||
description:
|
||||
- A redis config value.
|
||||
required: false
|
||||
default: null
|
||||
|
||||
|
||||
notes:
|
||||
|
@ -100,6 +113,12 @@ EXAMPLES = '''
|
|||
|
||||
# Flush only one db in a redis instance
|
||||
- redis: command=flush db=1 flush_mode=db
|
||||
|
||||
# Configure local redis to have 10000 max clients
|
||||
- redis: command=config name=maxclients value=10000
|
||||
|
||||
# Configure local redis to have lua time limit of 100 ms
|
||||
- redis: command=config name=lua-time-limit value=100
|
||||
'''
|
||||
|
||||
try:
|
||||
|
@ -146,7 +165,7 @@ def flush(client, db=None):
|
|||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
command=dict(default=None, choices=['slave', 'flush']),
|
||||
command=dict(default=None, choices=['slave', 'flush', 'config']),
|
||||
login_password=dict(default=None),
|
||||
login_host=dict(default='localhost'),
|
||||
login_port=dict(default='6379'),
|
||||
|
@ -155,6 +174,8 @@ def main():
|
|||
slave_mode=dict(default='slave', choices=['master', 'slave']),
|
||||
db=dict(default=None),
|
||||
flush_mode=dict(default='all', choices=['all', 'db']),
|
||||
name=dict(default=None),
|
||||
value=dict(default=None)
|
||||
),
|
||||
supports_check_mode = True
|
||||
)
|
||||
|
@ -272,7 +293,34 @@ def main():
|
|||
module.exit_json(changed=True, flushed=True, db=db)
|
||||
else: # Flush never fails :)
|
||||
module.fail_json(msg="Unable to flush '%d' database" % db)
|
||||
elif command == 'config':
|
||||
name = module.params['name']
|
||||
value = module.params['value']
|
||||
|
||||
r = redis.StrictRedis(host=login_host,
|
||||
port=login_port,
|
||||
password=login_password)
|
||||
|
||||
try:
|
||||
r.ping()
|
||||
except Exception, e:
|
||||
module.fail_json(msg="unable to connect to database: %s" % e)
|
||||
|
||||
|
||||
try:
|
||||
old_value = r.config_get(name)[name]
|
||||
except Exception, e:
|
||||
module.fail_json(msg="unable to read config: %s" % e)
|
||||
changed = old_value != value
|
||||
|
||||
if module.check_mode or not changed:
|
||||
module.exit_json(changed=changed, name=name, value=value)
|
||||
else:
|
||||
try:
|
||||
r.config_set(name, value)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="unable to write config: %s" % e)
|
||||
module.exit_json(changed=changed, name=name, value=value)
|
||||
else:
|
||||
module.fail_json(msg='A valid command must be provided')
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/env python
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2013, James Martin <jmartin@basho.com>, Drew Kerrigan <dkerrigan@basho.com>
|
||||
|
@ -73,6 +73,14 @@ options:
|
|||
default: None
|
||||
aliases: []
|
||||
choices: ['kv']
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
version_added: 1.5.1
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -97,7 +105,7 @@ except ImportError:
|
|||
|
||||
|
||||
def ring_check(module, riak_admin_bin):
|
||||
cmd = '%s ringready 2> /dev/null' % riak_admin_bin
|
||||
cmd = '%s ringready' % riak_admin_bin
|
||||
rc, out, err = module.run_command(cmd)
|
||||
if rc == 0 and 'TRUE All nodes agree on the ring' in out:
|
||||
return True
|
||||
|
@ -116,8 +124,8 @@ def main():
|
|||
wait_for_handoffs=dict(default=False, type='int'),
|
||||
wait_for_ring=dict(default=False, type='int'),
|
||||
wait_for_service=dict(
|
||||
required=False, default=None, choices=['kv'])
|
||||
)
|
||||
required=False, default=None, choices=['kv']),
|
||||
validate_certs = dict(default='yes', type='bool'))
|
||||
)
|
||||
|
||||
|
||||
|
@ -128,6 +136,7 @@ def main():
|
|||
wait_for_handoffs = module.params.get('wait_for_handoffs')
|
||||
wait_for_ring = module.params.get('wait_for_ring')
|
||||
wait_for_service = module.params.get('wait_for_service')
|
||||
validate_certs = module.params.get('validate_certs')
|
||||
|
||||
|
||||
#make sure riak commands are on the path
|
||||
|
@ -138,24 +147,13 @@ def main():
|
|||
while True:
|
||||
if time.time() > timeout:
|
||||
module.fail_json(msg='Timeout, could not fetch Riak stats.')
|
||||
try:
|
||||
if sys.version_info<(2,6,0):
|
||||
stats_raw = urllib2.urlopen(
|
||||
'http://%s/stats' % (http_conn), None).read()
|
||||
else:
|
||||
stats_raw = urllib2.urlopen(
|
||||
'http://%s/stats' % (http_conn), None, 5).read()
|
||||
(response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5)
|
||||
if info['status'] == 200:
|
||||
stats_raw = response.read()
|
||||
break
|
||||
except urllib2.HTTPError, e:
|
||||
time.sleep(5)
|
||||
except urllib2.URLError, e:
|
||||
time.sleep(5)
|
||||
except socket.timeout:
|
||||
time.sleep(5)
|
||||
except Exception, e:
|
||||
module.fail_json(msg='Could not fetch Riak stats: %s' % e)
|
||||
time.sleep(5)
|
||||
|
||||
# here we attempt to load those stats,
|
||||
# here we attempt to load those stats,
|
||||
try:
|
||||
stats = json.loads(stats_raw)
|
||||
except:
|
||||
|
@ -223,7 +221,7 @@ def main():
|
|||
if wait_for_handoffs:
|
||||
timeout = time.time() + wait_for_handoffs
|
||||
while True:
|
||||
cmd = '%s transfers 2> /dev/null' % riak_admin_bin
|
||||
cmd = '%s transfers' % riak_admin_bin
|
||||
rc, out, err = module.run_command(cmd)
|
||||
if 'No transfers active' in out:
|
||||
result['handoffs'] = 'No transfers active.'
|
||||
|
@ -233,7 +231,7 @@ def main():
|
|||
module.fail_json(msg='Timeout waiting for handoffs.')
|
||||
|
||||
if wait_for_service:
|
||||
cmd = '%s wait_for_service riak_%s %s' % ( riak_admin_bin, wait_for_service, node_name)
|
||||
cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name ]
|
||||
rc, out, err = module.run_command(cmd)
|
||||
result['service'] = out
|
||||
|
||||
|
@ -252,5 +250,6 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
|
||||
main()
|
||||
|
|
10
files/acl
10
files/acl
|
@ -95,7 +95,7 @@ EXAMPLES = '''
|
|||
- acl: name=/etc/foo.d entity=joe etype=user permissions=rw default=yes state=present
|
||||
|
||||
# Same as previous but using entry shorthand
|
||||
- acl: name=/etc/foo.d entrty="default:user:joe:rw-" state=present
|
||||
- acl: name=/etc/foo.d entry="default:user:joe:rw-" state=present
|
||||
|
||||
# Obtain the acl for a specific file
|
||||
- acl: name=/etc/foo.conf
|
||||
|
@ -115,6 +115,9 @@ def split_entry(entry):
|
|||
print "wtf?? %s => %s" % (entry,a)
|
||||
raise e
|
||||
|
||||
if d:
|
||||
d = True
|
||||
|
||||
if t.startswith("u"):
|
||||
t = "user"
|
||||
elif t.startswith("g"):
|
||||
|
@ -215,10 +218,10 @@ def main():
|
|||
|
||||
if state in ['present','absent']:
|
||||
if not entry and not etype:
|
||||
module.fail_json(msg="%s requries to have ither either etype and permissions or entry to be set" % state)
|
||||
module.fail_json(msg="%s requires either etype and permissions or just entry be set" % state)
|
||||
|
||||
if entry:
|
||||
if etype or entity or permissions:
|
||||
if etype or entity or permissions:
|
||||
module.fail_json(msg="entry and another incompatible field (entity, etype or permissions) are also set")
|
||||
if entry.count(":") not in [2,3]:
|
||||
module.fail_json(msg="Invalid entry: '%s', it requires 3 or 4 sections divided by ':'" % entry)
|
||||
|
@ -248,7 +251,6 @@ def main():
|
|||
if not old_permissions == permissions:
|
||||
changed = True
|
||||
break
|
||||
break
|
||||
if not matched:
|
||||
changed=True
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ options:
|
|||
default: "no"
|
||||
delimiter:
|
||||
description:
|
||||
- A delimiter to seperate the file contents.
|
||||
- A delimiter to separate the file contents.
|
||||
version_added: "1.4"
|
||||
required: false
|
||||
default: null
|
||||
|
@ -102,19 +102,38 @@ def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None):
|
|||
tmpfd, temp_path = tempfile.mkstemp()
|
||||
tmp = os.fdopen(tmpfd,'w')
|
||||
delimit_me = False
|
||||
add_newline = False
|
||||
|
||||
for f in sorted(os.listdir(src_path)):
|
||||
if compiled_regexp and not compiled_regexp.search(f):
|
||||
continue
|
||||
fragment = "%s/%s" % (src_path, f)
|
||||
if delimit_me and delimiter:
|
||||
tmp.write(delimiter)
|
||||
# always make sure there's a newline after the
|
||||
# delimiter, so lines don't run together
|
||||
if delimiter[-1] != '\n':
|
||||
tmp.write('\n')
|
||||
if os.path.isfile(fragment):
|
||||
tmp.write(file(fragment).read())
|
||||
if not os.path.isfile(fragment):
|
||||
continue
|
||||
fragment_content = file(fragment).read()
|
||||
|
||||
# always put a newline between fragments if the previous fragment didn't end with a newline.
|
||||
if add_newline:
|
||||
tmp.write('\n')
|
||||
|
||||
# delimiters should only appear between fragments
|
||||
if delimit_me:
|
||||
if delimiter:
|
||||
# un-escape anything like newlines
|
||||
delimiter = delimiter.decode('unicode-escape')
|
||||
tmp.write(delimiter)
|
||||
# always make sure there's a newline after the
|
||||
# delimiter, so lines don't run together
|
||||
if delimiter[-1] != '\n':
|
||||
tmp.write('\n')
|
||||
|
||||
tmp.write(fragment_content)
|
||||
delimit_me = True
|
||||
if fragment_content.endswith('\n'):
|
||||
add_newline = False
|
||||
else:
|
||||
add_newline = True
|
||||
|
||||
tmp.close()
|
||||
return temp_path
|
||||
|
||||
|
|
|
@ -73,6 +73,7 @@ options:
|
|||
description:
|
||||
- The validation command to run before copying into place. The path to the file to
|
||||
validate is passed in via '%s' which must be present as in the visudo example below.
|
||||
The command is passed securely so shell features like expansion and pipes won't work.
|
||||
required: false
|
||||
default: ""
|
||||
version_added: "1.2"
|
||||
|
@ -82,10 +83,6 @@ options:
|
|||
defaults.
|
||||
required: false
|
||||
version_added: "1.5"
|
||||
others:
|
||||
description:
|
||||
- all arguments accepted by the M(file) module also work here
|
||||
required: false
|
||||
author: Michael DeHaan
|
||||
notes:
|
||||
- The "copy" module recursively copy facility does not scale to lots (>hundreds) of files.
|
||||
|
|
303
files/file
303
files/file
|
@ -33,99 +33,11 @@ DOCUMENTATION = '''
|
|||
module: file
|
||||
version_added: "historical"
|
||||
short_description: Sets attributes of files
|
||||
extends_documentation_fragment: files
|
||||
description:
|
||||
- Sets attributes of files, symlinks, and directories, or removes
|
||||
files/symlinks/directories. Many other modules support the same options as
|
||||
the M(file) module - including M(copy), M(template), and M(assemble).
|
||||
options:
|
||||
path:
|
||||
description:
|
||||
- 'path to the file being managed. Aliases: I(dest), I(name)'
|
||||
required: true
|
||||
default: []
|
||||
aliases: ['dest', 'name']
|
||||
state:
|
||||
description:
|
||||
- If C(directory), all immediate subdirectories will be created if they
|
||||
do not exist. If C(file), the file will NOT be created if it does not
|
||||
exist, see the M(copy) or M(template) module if you want that behavior.
|
||||
If C(link), the symbolic link will be created or changed. Use C(hard)
|
||||
for hardlinks. If C(absent), directories will be recursively deleted,
|
||||
and files or symlinks will be unlinked. If C(touch) (new in 1.4), an empty file will
|
||||
be created if the c(dest) does not exist, while an existing file or
|
||||
directory will receive updated file access and modification times (similar
|
||||
to the way `touch` works from the command line).
|
||||
required: false
|
||||
default: file
|
||||
choices: [ file, link, directory, hard, touch, absent ]
|
||||
mode:
|
||||
required: false
|
||||
default: null
|
||||
choices: []
|
||||
description:
|
||||
- mode the file or directory should be, such as 0644 as would be fed to I(chmod)
|
||||
owner:
|
||||
required: false
|
||||
default: null
|
||||
choices: []
|
||||
description:
|
||||
- name of the user that should own the file/directory, as would be fed to I(chown)
|
||||
group:
|
||||
required: false
|
||||
default: null
|
||||
choices: []
|
||||
description:
|
||||
- name of the group that should own the file/directory, as would be fed to I(chown)
|
||||
src:
|
||||
required: false
|
||||
default: null
|
||||
choices: []
|
||||
description:
|
||||
- path of the file to link to (applies only to C(state=link)). Will accept absolute,
|
||||
relative and nonexisting paths. Relative paths are not expanded.
|
||||
seuser:
|
||||
required: false
|
||||
default: null
|
||||
choices: []
|
||||
description:
|
||||
- user part of SELinux file context. Will default to system policy, if
|
||||
applicable. If set to C(_default), it will use the C(user) portion of the
|
||||
policy if available
|
||||
serole:
|
||||
required: false
|
||||
default: null
|
||||
choices: []
|
||||
description:
|
||||
- role part of SELinux file context, C(_default) feature works as for I(seuser).
|
||||
setype:
|
||||
required: false
|
||||
default: null
|
||||
choices: []
|
||||
description:
|
||||
- type part of SELinux file context, C(_default) feature works as for I(seuser).
|
||||
selevel:
|
||||
required: false
|
||||
default: "s0"
|
||||
choices: []
|
||||
description:
|
||||
- level part of the SELinux file context. This is the MLS/MCS attribute,
|
||||
sometimes known as the C(range). C(_default) feature works as for
|
||||
I(seuser).
|
||||
recurse:
|
||||
required: false
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
version_added: "1.1"
|
||||
description:
|
||||
- recursively set the specified file attributes (applies only to state=directory)
|
||||
force:
|
||||
required: false
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
description:
|
||||
- 'force the creation of the symlinks in two cases: the source file does
|
||||
not exist (but will appear later); the destination exists and is a file (so, we need to unlink the
|
||||
"path" file and create symlink to the "src" file in place of it).'
|
||||
notes:
|
||||
- See also M(copy), M(template), M(assemble)
|
||||
requirements: [ ]
|
||||
|
@ -135,13 +47,14 @@ author: Michael DeHaan
|
|||
EXAMPLES = '''
|
||||
- file: path=/etc/foo.conf owner=foo group=foo mode=0644
|
||||
- file: src=/file/to/link/to dest=/path/to/symlink owner=foo group=foo state=link
|
||||
- file: path=/tmp/{{ item.path }} dest={{ item.dest }} state=link
|
||||
with_items:
|
||||
- { path: 'x', dest: 'y' }
|
||||
- { path: 'z', dest: 'k' }
|
||||
'''
|
||||
|
||||
def main():
|
||||
|
||||
# FIXME: pass this around, should not use global
|
||||
global module
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
state = dict(choices=['file','directory','link','hard','touch','absent'], default=None),
|
||||
|
@ -151,6 +64,7 @@ def main():
|
|||
force = dict(required=False,default=False,type='bool'),
|
||||
diff_peek = dict(default=None),
|
||||
validate = dict(required=False, default=None),
|
||||
src = dict(required=False, default=None),
|
||||
),
|
||||
add_file_common_args=True,
|
||||
supports_check_mode=True
|
||||
|
@ -159,23 +73,27 @@ def main():
|
|||
params = module.params
|
||||
state = params['state']
|
||||
force = params['force']
|
||||
diff_peek = params['diff_peek']
|
||||
src = params['src']
|
||||
|
||||
# modify source as we later reload and pass, specially relevant when used by other modules.
|
||||
params['path'] = path = os.path.expanduser(params['path'])
|
||||
|
||||
# short-circuit for diff_peek
|
||||
if params.get('diff_peek', None) is not None:
|
||||
if diff_peek is not None:
|
||||
appears_binary = False
|
||||
try:
|
||||
f = open(path)
|
||||
b = f.read(8192)
|
||||
f.close()
|
||||
if b.find("\x00") != -1:
|
||||
if "\x00" in b:
|
||||
appears_binary = True
|
||||
except:
|
||||
pass
|
||||
module.exit_json(path=path, changed=False, appears_binary=appears_binary)
|
||||
|
||||
# Find out current state
|
||||
prev_state = 'absent'
|
||||
|
||||
if os.path.lexists(path):
|
||||
if os.path.islink(path):
|
||||
prev_state = 'link'
|
||||
|
@ -187,76 +105,60 @@ def main():
|
|||
# could be many other things, but defaulting to file
|
||||
prev_state = 'file'
|
||||
|
||||
if prev_state is not None and state is None:
|
||||
# set state to current type of file
|
||||
state = prev_state
|
||||
elif state is None:
|
||||
# set default state to file
|
||||
state = 'file'
|
||||
# state should default to file, but since that creates many conflicts,
|
||||
# default to 'current' when it exists.
|
||||
if state is None:
|
||||
if prev_state != 'absent':
|
||||
state = prev_state
|
||||
else:
|
||||
state = 'file'
|
||||
|
||||
# source is both the source of a symlink or an informational passing of the src for a template module
|
||||
# or copy module, even if this module never uses it, it is needed to key off some things
|
||||
|
||||
src = params.get('src', None)
|
||||
if src:
|
||||
if src is not None:
|
||||
src = os.path.expanduser(src)
|
||||
|
||||
if src is not None and os.path.isdir(path) and state not in ["link", "absent"]:
|
||||
if params['original_basename']:
|
||||
basename = params['original_basename']
|
||||
else:
|
||||
basename = os.path.basename(src)
|
||||
params['path'] = path = os.path.join(path, basename)
|
||||
# original_basename is used by other modules that depend on file.
|
||||
if os.path.isdir(path) and state not in ["link", "absent"]:
|
||||
if params['original_basename']:
|
||||
basename = params['original_basename']
|
||||
else:
|
||||
basename = os.path.basename(src)
|
||||
params['path'] = path = os.path.join(path, basename)
|
||||
else:
|
||||
if state in ['link','hard']:
|
||||
module.fail_json(msg='src and dest are required for creating links')
|
||||
|
||||
file_args = module.load_file_common_arguments(params)
|
||||
|
||||
if state in ['link','hard'] and (src is None or path is None):
|
||||
module.fail_json(msg='src and dest are required for creating links')
|
||||
elif path is None:
|
||||
module.fail_json(msg='path is required')
|
||||
|
||||
changed = False
|
||||
|
||||
recurse = params['recurse']
|
||||
if recurse and state != 'directory':
|
||||
module.fail_json(path=path, msg="recurse option requires state to be 'directory'")
|
||||
|
||||
if recurse and state == 'file' and prev_state == 'directory':
|
||||
state = 'directory'
|
||||
|
||||
if prev_state != 'absent' and state == 'absent':
|
||||
try:
|
||||
if prev_state == 'directory':
|
||||
if os.path.islink(path):
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
os.unlink(path)
|
||||
else:
|
||||
if state == 'absent':
|
||||
if state != prev_state:
|
||||
if not module.check_mode:
|
||||
if prev_state == 'directory':
|
||||
try:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
shutil.rmtree(path, ignore_errors=False)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="rmtree failed: %s" % str(e))
|
||||
else:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
os.unlink(path)
|
||||
except Exception, e:
|
||||
module.fail_json(path=path, msg=str(e))
|
||||
module.exit_json(path=path, changed=True)
|
||||
else:
|
||||
try:
|
||||
os.unlink(path)
|
||||
except Exception, e:
|
||||
module.fail_json(path=path, msg="unlinking failed: %s " % str(e))
|
||||
module.exit_json(path=path, changed=True)
|
||||
else:
|
||||
module.exit_json(path=path, changed=False)
|
||||
|
||||
if prev_state != 'absent' and prev_state != state:
|
||||
if not (force and (prev_state == 'file' or prev_state == 'hard' or prev_state == 'directory') and state == 'link') and state != 'touch':
|
||||
module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, src))
|
||||
elif state == 'file':
|
||||
if state != prev_state:
|
||||
# file is not absent and any other state is a conflict
|
||||
module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state))
|
||||
|
||||
if prev_state == 'absent' and state == 'absent':
|
||||
module.exit_json(path=path, changed=False)
|
||||
|
||||
if state == 'file':
|
||||
|
||||
if prev_state != 'file':
|
||||
module.fail_json(path=path, msg='file (%s) does not exist, use copy or template module to create' % path)
|
||||
|
||||
changed = module.set_file_attributes_if_different(file_args, changed)
|
||||
changed = module.set_fs_attributes_if_different(file_args, changed)
|
||||
module.exit_json(path=path, changed=changed)
|
||||
|
||||
elif state == 'directory':
|
||||
|
@ -266,31 +168,33 @@ def main():
|
|||
os.makedirs(path)
|
||||
changed = True
|
||||
|
||||
changed = module.set_directory_attributes_if_different(file_args, changed)
|
||||
changed = module.set_fs_attributes_if_different(file_args, changed)
|
||||
|
||||
if recurse:
|
||||
for root,dirs,files in os.walk( file_args['path'] ):
|
||||
for dir in dirs:
|
||||
dirname=os.path.join(root,dir)
|
||||
for fsobj in dirs + files:
|
||||
fsname=os.path.join(root, fsobj)
|
||||
tmp_file_args = file_args.copy()
|
||||
tmp_file_args['path']=dirname
|
||||
changed = module.set_directory_attributes_if_different(tmp_file_args, changed)
|
||||
for file in files:
|
||||
filename=os.path.join(root,file)
|
||||
tmp_file_args = file_args.copy()
|
||||
tmp_file_args['path']=filename
|
||||
changed = module.set_file_attributes_if_different(tmp_file_args, changed)
|
||||
tmp_file_args['path']=fsname
|
||||
changed = module.set_fs_attributes_if_different(tmp_file_args, changed)
|
||||
|
||||
module.exit_json(path=path, changed=changed)
|
||||
|
||||
elif state in ['link','hard']:
|
||||
|
||||
absrc = src
|
||||
if not os.path.isabs(absrc):
|
||||
absrc = os.path.normpath('%s/%s' % (os.path.dirname(path), absrc))
|
||||
|
||||
if not os.path.exists(absrc) and not force:
|
||||
module.fail_json(path=path, src=src, msg='src file does not exist, use "force=yes" if you really want to create the link: %s' % absrc)
|
||||
|
||||
if state == 'hard':
|
||||
if os.path.isabs(src):
|
||||
abs_src = src
|
||||
else:
|
||||
if not os.path.isabs(src):
|
||||
module.fail_json(msg="absolute paths are required")
|
||||
|
||||
if not os.path.exists(abs_src) and not force:
|
||||
module.fail_json(path=path, src=src, msg='src file does not exist')
|
||||
elif prev_state in ['file', 'hard', 'directory'] and not force:
|
||||
module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, src))
|
||||
|
||||
if prev_state == 'absent':
|
||||
changed = True
|
||||
|
@ -300,58 +204,63 @@ def main():
|
|||
changed = True
|
||||
elif prev_state == 'hard':
|
||||
if not (state == 'hard' and os.stat(path).st_ino == os.stat(src).st_ino):
|
||||
changed = True
|
||||
if not force:
|
||||
module.fail_json(dest=path, src=src, msg='Cannot link, different hard link exists at destination')
|
||||
changed = True
|
||||
elif prev_state == 'file':
|
||||
if not force:
|
||||
module.fail_json(dest=path, src=src, msg='Cannot link, file exists at destination')
|
||||
elif prev_state in ['file', 'directory']:
|
||||
changed = True
|
||||
elif prev_state == 'directory':
|
||||
if not force:
|
||||
module.fail_json(dest=path, src=src, msg='Cannot link, directory exists at destination')
|
||||
changed = True
|
||||
module.fail_json(dest=path, src=src, msg='Cannot link, %s exists at destination' % prev_state)
|
||||
else:
|
||||
module.fail_json(dest=path, src=src, msg='unexpected position reached')
|
||||
|
||||
if changed and not module.check_mode:
|
||||
if prev_state != 'absent':
|
||||
# try to replace atomically
|
||||
tmppath = '/'.join([os.path.dirname(path), ".%s.%s.tmp" % (os.getpid(),time.time())])
|
||||
try:
|
||||
os.unlink(path)
|
||||
if state == 'hard':
|
||||
os.link(src,tmppath)
|
||||
else:
|
||||
os.symlink(src, tmppath)
|
||||
os.rename(tmppath, path)
|
||||
except OSError, e:
|
||||
module.fail_json(path=path, msg='Error while removing existing target: %s' % str(e))
|
||||
try:
|
||||
if state == 'hard':
|
||||
os.link(src,path)
|
||||
else:
|
||||
os.symlink(src, path)
|
||||
except OSError, e:
|
||||
module.fail_json(path=path, msg='Error while linking: %s' % str(e))
|
||||
if os.path.exists(tmppath):
|
||||
os.unlink(tmppath)
|
||||
module.fail_json(path=path, msg='Error while replacing: %s' % str(e))
|
||||
else:
|
||||
try:
|
||||
if state == 'hard':
|
||||
os.link(src,path)
|
||||
else:
|
||||
os.symlink(src, path)
|
||||
except OSError, e:
|
||||
module.fail_json(path=path, msg='Error while linking: %s' % str(e))
|
||||
|
||||
changed = module.set_file_attributes_if_different(file_args, changed)
|
||||
changed = module.set_fs_attributes_if_different(file_args, changed)
|
||||
module.exit_json(dest=path, src=src, changed=changed)
|
||||
|
||||
elif state == 'touch':
|
||||
if module.check_mode:
|
||||
module.exit_json(path=path, skipped=True)
|
||||
if not module.check_mode:
|
||||
|
||||
if prev_state == 'absent':
|
||||
try:
|
||||
open(path, 'w').close()
|
||||
except OSError, e:
|
||||
module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e))
|
||||
elif prev_state in ['file', 'directory']:
|
||||
try:
|
||||
os.utime(path, None)
|
||||
except OSError, e:
|
||||
module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e))
|
||||
else:
|
||||
module.fail_json(msg='Cannot touch other than files and directories')
|
||||
|
||||
module.set_fs_attributes_if_different(file_args, True)
|
||||
|
||||
if prev_state not in ['file', 'directory', 'absent']:
|
||||
module.fail_json(msg='Cannot touch other than files and directories')
|
||||
if prev_state != 'absent':
|
||||
try:
|
||||
os.utime(path, None)
|
||||
except OSError, e:
|
||||
module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e))
|
||||
else:
|
||||
try:
|
||||
open(path, 'w').close()
|
||||
except OSError, e:
|
||||
module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e))
|
||||
module.set_file_attributes_if_different(file_args, True)
|
||||
module.exit_json(dest=path, changed=True)
|
||||
|
||||
else:
|
||||
module.fail_json(path=path, msg='unexpected position reached')
|
||||
module.fail_json(path=path, msg='unexpected position reached')
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
|
|
@ -110,7 +110,8 @@ options:
|
|||
validate:
|
||||
required: false
|
||||
description:
|
||||
- validation to run before copying into place
|
||||
- validation to run before copying into place. The command is passed
|
||||
securely so shell features like expansion and pipes won't work.
|
||||
required: false
|
||||
default: None
|
||||
version_added: "1.4"
|
||||
|
@ -137,7 +138,7 @@ EXAMPLES = r"""
|
|||
# Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs.
|
||||
- lineinfile: "dest=/etc/sudoers state=present regexp='^%wheel' line='%wheel ALL=(ALL) NOPASSWD: ALL'"
|
||||
|
||||
- lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\1Xms${xms}m\3' backrefs=yes
|
||||
- lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\\1Xms${xms}m\\3' backrefs=yes
|
||||
|
||||
# Validate a the sudoers file before saving
|
||||
- lineinfile: dest=/etc/sudoers state=present regexp='^%ADMIN ALL\=' line='%ADMIN ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s'
|
||||
|
|
160
files/replace
Normal file
160
files/replace
Normal file
|
@ -0,0 +1,160 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2013, Evan Kaufman <evan@digitalflophouse.com
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import re
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: replace
|
||||
author: Evan Kaufman
|
||||
short_description: Replace all instances of a particular string in a
|
||||
file using a back-referenced regular expression.
|
||||
description:
|
||||
- This module will replace all instances of a pattern within a file.
|
||||
- It is up to the user to maintain idempotence by ensuring that the
|
||||
same pattern would never match any replacements made.
|
||||
version_added: "1.6"
|
||||
options:
|
||||
dest:
|
||||
required: true
|
||||
aliases: [ name, destfile ]
|
||||
description:
|
||||
- The file to modify.
|
||||
regexp:
|
||||
required: true
|
||||
description:
|
||||
- The regular expression to look for in the contents of the file.
|
||||
Uses Python regular expressions; see
|
||||
U(http://docs.python.org/2/library/re.html).
|
||||
Uses multiline mode, which means C(^) and C($) match the beginning
|
||||
and end respectively of I(each line) of the file.
|
||||
replace:
|
||||
required: false
|
||||
description:
|
||||
- The string to replace regexp matches. May contain backreferences
|
||||
that will get expanded with the regexp capture groups if the regexp
|
||||
matches. If not set, matches are removed entirely.
|
||||
backup:
|
||||
required: false
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
description:
|
||||
- Create a backup file including the timestamp information so you can
|
||||
get the original file back if you somehow clobbered it incorrectly.
|
||||
validate:
|
||||
required: false
|
||||
description:
|
||||
- validation to run before copying into place
|
||||
required: false
|
||||
default: None
|
||||
others:
|
||||
description:
|
||||
- All arguments accepted by the M(file) module also work here.
|
||||
required: false
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
- replace: dest=/etc/hosts regexp='(\s+)old\.host\.name(\s+.*)?$' replace='\1new.host.name\2' backup=yes
|
||||
|
||||
- replace: dest=/home/jdoe/.ssh/known_hosts regexp='^old\.host\.name[^\n]*\n' owner=jdoe group=jdoe mode=644
|
||||
|
||||
- replace: dest=/etc/apache/ports regexp='^(NameVirtualHost|Listen)\s+80\s*$' replace='\1 127.0.0.1:8080' validate='/usr/sbin/apache2ctl -f %s -t'
|
||||
"""
|
||||
|
||||
def write_changes(module,contents,dest):
|
||||
|
||||
tmpfd, tmpfile = tempfile.mkstemp()
|
||||
f = os.fdopen(tmpfd,'wb')
|
||||
f.write(contents)
|
||||
f.close()
|
||||
|
||||
validate = module.params.get('validate', None)
|
||||
valid = not validate
|
||||
if validate:
|
||||
(rc, out, err) = module.run_command(validate % tmpfile)
|
||||
valid = rc == 0
|
||||
if rc != 0:
|
||||
module.fail_json(msg='failed to validate: '
|
||||
'rc:%s error:%s' % (rc,err))
|
||||
if valid:
|
||||
module.atomic_move(tmpfile, dest)
|
||||
|
||||
def check_file_attrs(module, changed, message):
|
||||
|
||||
file_args = module.load_file_common_arguments(module.params)
|
||||
if module.set_file_attributes_if_different(file_args, False):
|
||||
|
||||
if changed:
|
||||
message += " and "
|
||||
changed = True
|
||||
message += "ownership, perms or SE linux context changed"
|
||||
|
||||
return message, changed
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
dest=dict(required=True, aliases=['name', 'destfile']),
|
||||
regexp=dict(required=True),
|
||||
replace=dict(default='', type='str'),
|
||||
backup=dict(default=False, type='bool'),
|
||||
validate=dict(default=None, type='str'),
|
||||
),
|
||||
add_file_common_args=True,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
params = module.params
|
||||
dest = os.path.expanduser(params['dest'])
|
||||
|
||||
if os.path.isdir(dest):
|
||||
module.fail_json(rc=256, msg='Destination %s is a directory !' % dest)
|
||||
|
||||
if not os.path.exists(dest):
|
||||
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
|
||||
else:
|
||||
f = open(dest, 'rb')
|
||||
contents = f.read()
|
||||
f.close()
|
||||
|
||||
mre = re.compile(params['regexp'], re.MULTILINE)
|
||||
result = re.subn(mre, params['replace'], contents, 0)
|
||||
|
||||
if result[1] > 0:
|
||||
msg = '%s replacements made' % result[1]
|
||||
changed = True
|
||||
else:
|
||||
msg = ''
|
||||
changed = False
|
||||
|
||||
if changed and not module.check_mode:
|
||||
if params['backup'] and os.path.exists(dest):
|
||||
module.backup_local(dest)
|
||||
write_changes(module, result[0], dest)
|
||||
|
||||
msg, changed = check_file_attrs(module, changed, msg)
|
||||
module.exit_json(changed=changed, msg=msg)
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
|
||||
main()
|
|
@ -132,8 +132,9 @@ def main():
|
|||
if S_ISLNK(mode):
|
||||
d['lnk_source'] = os.path.realpath(path)
|
||||
|
||||
if S_ISREG(mode) and get_md5:
|
||||
d['md5'] = module.md5(path)
|
||||
if S_ISREG(mode) and get_md5 and os.access(path,os.R_OK):
|
||||
d['md5'] = module.md5(path)
|
||||
|
||||
|
||||
try:
|
||||
pw = pwd.getpwuid(st.st_uid)
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import subprocess
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: synchronize
|
||||
|
@ -51,6 +49,13 @@ options:
|
|||
choices: [ 'yes', 'no' ]
|
||||
default: 'yes'
|
||||
required: false
|
||||
checksum:
|
||||
description:
|
||||
- Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default - the "checksum" option will not disable it.
|
||||
choices: [ 'yes', 'no' ]
|
||||
default: 'no'
|
||||
required: false
|
||||
version_added: "1.6"
|
||||
existing_only:
|
||||
description:
|
||||
- Skip creating new files on receiver.
|
||||
|
@ -60,7 +65,7 @@ options:
|
|||
version_added: "1.5"
|
||||
delete:
|
||||
description:
|
||||
- Delete files that don't exist (after transfer, not before) in the C(src) path.
|
||||
- Delete files that don't exist (after transfer, not before) in the C(src) path. This option requires C(recursive=yes).
|
||||
choices: [ 'yes', 'no' ]
|
||||
default: 'no'
|
||||
required: false
|
||||
|
@ -121,6 +126,17 @@ options:
|
|||
- Specify a --timeout for the rsync command in seconds.
|
||||
default: 10
|
||||
required: false
|
||||
set_remote_user:
|
||||
description:
|
||||
- put user@ for the remote paths. If you have a custom ssh config to define the remote user for a host
|
||||
that does not match the inventory user, you should set this parameter to "no".
|
||||
default: yes
|
||||
rsync_opts:
|
||||
description:
|
||||
- Specify additional rsync options by passing in an array.
|
||||
default:
|
||||
required: false
|
||||
version_added: "1.6"
|
||||
notes:
|
||||
- Inspect the verbose output to validate the destination user/host/path
|
||||
are what was expected.
|
||||
|
@ -144,6 +160,9 @@ synchronize: src=some/relative/path dest=/some/absolute/path archive=no
|
|||
# Synchronization with --archive options enabled except for --recursive
|
||||
synchronize: src=some/relative/path dest=/some/absolute/path recursive=no
|
||||
|
||||
# Synchronization with --archive options enabled except for --times, with --checksum option enabled
|
||||
synchronize: src=some/relative/path dest=/some/absolute/path checksum=yes times=no
|
||||
|
||||
# Synchronization without --archive options enabled except use --links
|
||||
synchronize: src=some/relative/path dest=/some/absolute/path archive=no links=yes
|
||||
|
||||
|
@ -169,6 +188,9 @@ synchronize: src=some/relative/path dest=/some/absolute/path rsync_path="sudo rs
|
|||
- var # exclude any path whose last part is 'var'
|
||||
- /var # exclude any path starting with 'var' starting at the source directory
|
||||
+ /var/conf # include /var/conf even though it was previously excluded
|
||||
|
||||
# Synchronize passing in extra rsync options
|
||||
synchronize: src=/tmp/helloworld dest=/var/www/helloword rsync_opts=--no-motd,--exclude=.git
|
||||
'''
|
||||
|
||||
|
||||
|
@ -182,6 +204,7 @@ def main():
|
|||
private_key = dict(default=None),
|
||||
rsync_path = dict(default=None),
|
||||
archive = dict(default='yes', type='bool'),
|
||||
checksum = dict(default='no', type='bool'),
|
||||
existing_only = dict(default='no', type='bool'),
|
||||
dirs = dict(default='no', type='bool'),
|
||||
recursive = dict(type='bool'),
|
||||
|
@ -191,7 +214,9 @@ def main():
|
|||
times = dict(type='bool'),
|
||||
owner = dict(type='bool'),
|
||||
group = dict(type='bool'),
|
||||
rsync_timeout = dict(type='int', default=10)
|
||||
set_remote_user = dict(default='yes', type='bool'),
|
||||
rsync_timeout = dict(type='int', default=10),
|
||||
rsync_opts = dict(type='list')
|
||||
),
|
||||
supports_check_mode = True
|
||||
)
|
||||
|
@ -205,6 +230,7 @@ def main():
|
|||
rsync = module.params.get('local_rsync_path', 'rsync')
|
||||
rsync_timeout = module.params.get('rsync_timeout', 'rsync_timeout')
|
||||
archive = module.params['archive']
|
||||
checksum = module.params['checksum']
|
||||
existing_only = module.params['existing_only']
|
||||
dirs = module.params['dirs']
|
||||
# the default of these params depends on the value of archive
|
||||
|
@ -215,6 +241,7 @@ def main():
|
|||
times = module.params['times']
|
||||
owner = module.params['owner']
|
||||
group = module.params['group']
|
||||
rsync_opts = module.params['rsync_opts']
|
||||
|
||||
cmd = '%s --delay-updates -FF --compress --timeout=%s' % (rsync, rsync_timeout)
|
||||
if module.check_mode:
|
||||
|
@ -223,6 +250,8 @@ def main():
|
|||
cmd = cmd + ' --delete-after'
|
||||
if existing_only:
|
||||
cmd = cmd + ' --existing'
|
||||
if checksum:
|
||||
cmd = cmd + ' --checksum'
|
||||
if archive:
|
||||
cmd = cmd + ' --archive'
|
||||
if recursive is False:
|
||||
|
@ -270,8 +299,17 @@ def main():
|
|||
|
||||
if rsync_path:
|
||||
cmd = cmd + " --rsync-path '%s'" %(rsync_path)
|
||||
if rsync_opts:
|
||||
cmd = cmd + " " + " ".join(rsync_opts)
|
||||
changed_marker = '<<CHANGED>>'
|
||||
cmd = cmd + " --out-format='" + changed_marker + "%i %n%L'"
|
||||
|
||||
# expand the paths
|
||||
if '@' not in source:
|
||||
source = os.path.expanduser(source)
|
||||
if '@' not in dest:
|
||||
dest = os.path.expanduser(dest)
|
||||
|
||||
cmd = ' '.join([cmd, source, dest])
|
||||
cmdstr = cmd
|
||||
(rc, out, err) = module.run_command(cmd)
|
||||
|
@ -279,8 +317,12 @@ def main():
|
|||
return module.fail_json(msg=err, rc=rc, cmd=cmdstr)
|
||||
else:
|
||||
changed = changed_marker in out
|
||||
return module.exit_json(changed=changed, msg=out.replace(changed_marker,''),
|
||||
rc=rc, cmd=cmdstr)
|
||||
out_clean=out.replace(changed_marker,'')
|
||||
out_lines=out_clean.split('\n')
|
||||
while '' in out_lines:
|
||||
out_lines.remove('')
|
||||
return module.exit_json(changed=changed, msg=out_clean,
|
||||
rc=rc, cmd=cmdstr, stdout_lines=out_lines)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
|
|
@ -17,7 +17,7 @@ description:
|
|||
the template's machine, C(template_uid) the owner, C(template_path) the
|
||||
absolute path of the template, C(template_fullpath) is the absolute path of the
|
||||
template, and C(template_run_date) is the date that the template was rendered. Note that including
|
||||
a string that uses a date in the template will resort in the template being marked 'changed'
|
||||
a string that uses a date in the template will result in the template being marked 'changed'
|
||||
each time."
|
||||
options:
|
||||
src:
|
||||
|
@ -40,14 +40,13 @@ options:
|
|||
default: "no"
|
||||
validate:
|
||||
description:
|
||||
- validation to run before copying into place
|
||||
- The validation command to run before copying into place.
|
||||
- The path to the file to validate is passed in via '%s' which must be present as in the visudo example below.
|
||||
- validation to run before copying into place. The command is passed
|
||||
securely so shell features like expansion and pipes won't work.
|
||||
required: false
|
||||
default: ""
|
||||
version_added: "1.2"
|
||||
others:
|
||||
description:
|
||||
- all arguments accepted by the M(file) module also work here, as well as the M(copy) module (except the the 'content' parameter).
|
||||
required: false
|
||||
notes:
|
||||
- "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)."
|
||||
|
||||
|
@ -63,6 +62,6 @@ EXAMPLES = '''
|
|||
# Example from Ansible Playbooks
|
||||
- template: src=/mytemplates/foo.j2 dest=/etc/file.conf owner=bin group=wheel mode=0644
|
||||
|
||||
# Copy a new "sudoers file into place, after passing validation with visudo
|
||||
- action: template src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s'
|
||||
# Copy a new "sudoers" file into place, after passing validation with visudo
|
||||
- template: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s'
|
||||
'''
|
||||
|
|
|
@ -43,7 +43,13 @@ options:
|
|||
required: false
|
||||
choices: [ "yes", "no" ]
|
||||
default: "yes"
|
||||
author: Dylan Martin
|
||||
creates:
|
||||
description:
|
||||
- a filename, when it already exists, this step will B(not) be run.
|
||||
required: no
|
||||
default: null
|
||||
version_added: "1.6"
|
||||
author: Dylan Martin
|
||||
todo:
|
||||
- detect changed/unchanged for .zip files
|
||||
- handle common unarchive args, like preserve owner/timestamp etc...
|
||||
|
@ -75,17 +81,20 @@ class ZipFile(object):
|
|||
self.src = src
|
||||
self.dest = dest
|
||||
self.module = module
|
||||
self.cmd_path = self.module.get_bin_path('unzip')
|
||||
|
||||
def is_unarchived(self):
|
||||
return dict(unarchived=False)
|
||||
|
||||
def unarchive(self):
|
||||
cmd = 'unzip -o "%s" -d "%s"' % (self.src, self.dest)
|
||||
cmd = '%s -o "%s" -d "%s"' % (self.cmd_path, self.src, self.dest)
|
||||
rc, out, err = self.module.run_command(cmd)
|
||||
return dict(cmd=cmd, rc=rc, out=out, err=err)
|
||||
|
||||
def can_handle_archive(self):
|
||||
cmd = 'unzip -l "%s"' % self.src
|
||||
if not self.cmd_path:
|
||||
return False
|
||||
cmd = '%s -l "%s"' % (self.cmd_path, self.src)
|
||||
rc, out, err = self.module.run_command(cmd)
|
||||
if rc == 0:
|
||||
return True
|
||||
|
@ -99,23 +108,26 @@ class TgzFile(object):
|
|||
self.src = src
|
||||
self.dest = dest
|
||||
self.module = module
|
||||
self.cmd_path = self.module.get_bin_path('tar')
|
||||
self.zipflag = 'z'
|
||||
|
||||
def is_unarchived(self):
|
||||
dirof = os.path.dirname(self.dest)
|
||||
destbase = os.path.basename(self.dest)
|
||||
cmd = 'tar -v -C "%s" --diff -%sf "%s"' % (self.dest, self.zipflag, self.src)
|
||||
cmd = '%s -v -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src)
|
||||
rc, out, err = self.module.run_command(cmd)
|
||||
unarchived = (rc == 0)
|
||||
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
|
||||
|
||||
def unarchive(self):
|
||||
cmd = 'tar -C "%s" -x%sf "%s"' % (self.dest, self.zipflag, self.src)
|
||||
cmd = '%s -C "%s" -x%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src)
|
||||
rc, out, err = self.module.run_command(cmd)
|
||||
return dict(cmd=cmd, rc=rc, out=out, err=err)
|
||||
|
||||
def can_handle_archive(self):
|
||||
cmd = 'tar -t%sf "%s"' % (self.zipflag, self.src)
|
||||
if not self.cmd_path:
|
||||
return False
|
||||
cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src)
|
||||
rc, out, err = self.module.run_command(cmd)
|
||||
if rc == 0:
|
||||
if len(out.splitlines(True)) > 0:
|
||||
|
@ -129,6 +141,7 @@ class TarFile(TgzFile):
|
|||
self.src = src
|
||||
self.dest = dest
|
||||
self.module = module
|
||||
self.cmd_path = self.module.get_bin_path('tar')
|
||||
self.zipflag = ''
|
||||
|
||||
|
||||
|
@ -138,6 +151,7 @@ class TarBzip(TgzFile):
|
|||
self.src = src
|
||||
self.dest = dest
|
||||
self.module = module
|
||||
self.cmd_path = self.module.get_bin_path('tar')
|
||||
self.zipflag = 'j'
|
||||
|
||||
|
||||
|
@ -147,6 +161,7 @@ class TarXz(TgzFile):
|
|||
self.src = src
|
||||
self.dest = dest
|
||||
self.module = module
|
||||
self.cmd_path = self.module.get_bin_path('tar')
|
||||
self.zipflag = 'J'
|
||||
|
||||
|
||||
|
@ -157,7 +172,7 @@ def pick_handler(src, dest, module):
|
|||
obj = handler(src, dest, module)
|
||||
if obj.can_handle_archive():
|
||||
return obj
|
||||
raise RuntimeError('Failed to find handler to unarchive "%s"' % src)
|
||||
module.fail_json(msg='Failed to find handler to unarchive. Make sure the required command to extract the file is installed.')
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -168,6 +183,7 @@ def main():
|
|||
original_basename = dict(required=False), # used to handle 'dest is a directory' via template, a slight hack
|
||||
dest = dict(required=True),
|
||||
copy = dict(default=True, type='bool'),
|
||||
creates = dict(required=False),
|
||||
),
|
||||
add_file_common_args=True,
|
||||
)
|
||||
|
@ -175,6 +191,7 @@ def main():
|
|||
src = os.path.expanduser(module.params['src'])
|
||||
dest = os.path.expanduser(module.params['dest'])
|
||||
copy = module.params['copy']
|
||||
creates = module.params['creates']
|
||||
|
||||
# did tar file arrive?
|
||||
if not os.path.exists(src):
|
||||
|
@ -185,6 +202,20 @@ def main():
|
|||
if not os.access(src, os.R_OK):
|
||||
module.fail_json(msg="Source '%s' not readable" % src)
|
||||
|
||||
if creates:
|
||||
# do not run the command if the line contains creates=filename
|
||||
# and the filename already exists. This allows idempotence
|
||||
# of command executions.
|
||||
v = os.path.expanduser(creates)
|
||||
if os.path.exists(v):
|
||||
module.exit_json(
|
||||
stdout="skipped, since %s exists" % v,
|
||||
skipped=True,
|
||||
changed=False,
|
||||
stderr=False,
|
||||
rc=0
|
||||
)
|
||||
|
||||
# is dest OK to receive tar file?
|
||||
if not os.path.exists(os.path.dirname(dest)):
|
||||
module.fail_json(msg="Destination directory '%s' does not exist" % (os.path.dirname(dest)))
|
||||
|
|
|
@ -72,7 +72,7 @@ if len(sys.argv) < 3:
|
|||
})
|
||||
sys.exit(1)
|
||||
|
||||
jid = sys.argv[1]
|
||||
jid = "%s.%d" % (sys.argv[1], os.getpid())
|
||||
time_limit = sys.argv[2]
|
||||
wrapped_module = sys.argv[3]
|
||||
argsfile = sys.argv[4]
|
||||
|
|
|
@ -52,6 +52,7 @@ options:
|
|||
- erlang node name of the rabbit we wish to configure
|
||||
required: false
|
||||
default: rabbit
|
||||
version_added: "1.2"
|
||||
state:
|
||||
description:
|
||||
- Specify if user is to be added or removed
|
||||
|
|
|
@ -55,6 +55,7 @@ options:
|
|||
- erlang node name of the rabbit we wish to configure
|
||||
required: false
|
||||
default: rabbit
|
||||
version_added: "1.2"
|
||||
configure_priv:
|
||||
description:
|
||||
- Regular expression to restrict configure actions on a resource
|
||||
|
|
|
@ -39,6 +39,7 @@ options:
|
|||
- erlang node name of the rabbit we wish to configure
|
||||
required: false
|
||||
default: rabbit
|
||||
version_added: "1.2"
|
||||
tracing:
|
||||
description:
|
||||
- Enable/disable tracing for a vhost
|
||||
|
|
|
@ -51,7 +51,15 @@ options:
|
|||
description:
|
||||
- Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
|
||||
required: false
|
||||
default: https://airbrake.io/deploys
|
||||
default: "https://airbrake.io/deploys"
|
||||
version_added: "1.5"
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates for the target url will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ urllib, urllib2 ]
|
||||
|
@ -64,29 +72,12 @@ EXAMPLES = '''
|
|||
revision=4.2
|
||||
'''
|
||||
|
||||
HAS_URLLIB = True
|
||||
try:
|
||||
import urllib
|
||||
except ImportError:
|
||||
HAS_URLLIB = False
|
||||
|
||||
HAS_URLLIB2 = True
|
||||
try:
|
||||
import urllib2
|
||||
except ImportError:
|
||||
HAS_URLLIB2 = False
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
def main():
|
||||
|
||||
if not HAS_URLLIB:
|
||||
module.fail_json(msg="urllib is not installed")
|
||||
if not HAS_URLLIB2:
|
||||
module.fail_json(msg="urllib2 is not installed")
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
token=dict(required=True),
|
||||
|
@ -94,7 +85,8 @@ def main():
|
|||
user=dict(required=False),
|
||||
repo=dict(required=False),
|
||||
revision=dict(required=False),
|
||||
url=dict(required=False, default='https://api.airbrake.io/deploys.txt')
|
||||
url=dict(required=False, default='https://api.airbrake.io/deploys.txt'),
|
||||
validate_certs=dict(default='yes', type='bool'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
@ -123,18 +115,16 @@ def main():
|
|||
module.exit_json(changed=True)
|
||||
|
||||
# Send the data to airbrake
|
||||
try:
|
||||
req = urllib2.Request(url, urllib.urlencode(params))
|
||||
result=urllib2.urlopen(req)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="unable to update airbrake via %s?%s : %s" % (url, urllib.urlencode(params), e))
|
||||
data = urllib.urlencode(params)
|
||||
response, info = fetch_url(module, url, data=data)
|
||||
if info['status'] == 200:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
if result.code == 200:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.fail_json(msg="HTTP result code: %d connecting to %s" % (result.code, url))
|
||||
module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
|
||||
main()
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|||
|
||||
import json
|
||||
import datetime
|
||||
import urllib2
|
||||
import base64
|
||||
import os
|
||||
|
||||
|
@ -59,6 +58,14 @@ options:
|
|||
description:
|
||||
- Organizations boundary API KEY
|
||||
required: true
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
version_added: 1.5.1
|
||||
|
||||
notes:
|
||||
- This module does not yet support boundary tags.
|
||||
|
@ -74,12 +81,6 @@ EXAMPLES='''
|
|||
|
||||
'''
|
||||
|
||||
try:
|
||||
import urllib2
|
||||
HAS_URLLIB2 = True
|
||||
except ImportError:
|
||||
HAS_URLLIB2 = False
|
||||
|
||||
api_host = "api.boundary.com"
|
||||
config_directory = "/etc/bprobe"
|
||||
|
||||
|
@ -101,7 +102,7 @@ def build_url(name, apiid, action, meter_id=None, cert_type=None):
|
|||
elif action == "delete":
|
||||
return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id)
|
||||
|
||||
def http_request(name, apiid, apikey, action, meter_id=None, cert_type=None):
|
||||
def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None):
|
||||
|
||||
if meter_id is None:
|
||||
url = build_url(name, apiid, action)
|
||||
|
@ -111,11 +112,11 @@ def http_request(name, apiid, apikey, action, meter_id=None, cert_type=None):
|
|||
else:
|
||||
url = build_url(name, apiid, action, meter_id, cert_type)
|
||||
|
||||
auth = auth_encode(apikey)
|
||||
request = urllib2.Request(url)
|
||||
request.add_header("Authorization", "Basic %s" % (auth))
|
||||
request.add_header("Content-Type", "application/json")
|
||||
return request
|
||||
headers = dict()
|
||||
headers["Authorization"] = "Basic %s" % auth_encode(apikey)
|
||||
headers["Content-Type"] = "application/json"
|
||||
|
||||
return fetch_url(module, url, data=data, headers=headers)
|
||||
|
||||
def create_meter(module, name, apiid, apikey):
|
||||
|
||||
|
@ -126,14 +127,10 @@ def create_meter(module, name, apiid, apikey):
|
|||
module.exit_json(status="Meter " + name + " already exists",changed=False)
|
||||
else:
|
||||
# If it doesn't exist, create it
|
||||
request = http_request(name, apiid, apikey, action="create")
|
||||
# A create request seems to need a json body with the name of the meter in it
|
||||
body = '{"name":"' + name + '"}'
|
||||
request.add_data(body)
|
||||
response, info = http_request(module, name, apiid, apikey, data=body, action="create")
|
||||
|
||||
try:
|
||||
result = urllib2.urlopen(request)
|
||||
except urllib2.URLError, e:
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="Failed to connect to api host to create meter")
|
||||
|
||||
# If the config directory doesn't exist, create it
|
||||
|
@ -160,15 +157,13 @@ def create_meter(module, name, apiid, apikey):
|
|||
|
||||
def search_meter(module, name, apiid, apikey):
|
||||
|
||||
request = http_request(name, apiid, apikey, action="search")
|
||||
response, info = http_request(module, name, apiid, apikey, action="search")
|
||||
|
||||
try:
|
||||
result = urllib2.urlopen(request)
|
||||
except urllib2.URLError, e:
|
||||
if info['status'] != 200:
|
||||
module.fail_json("Failed to connect to api host to search for meter")
|
||||
|
||||
# Return meters
|
||||
return json.loads(result.read())
|
||||
return json.loads(response.read())
|
||||
|
||||
def get_meter_id(module, name, apiid, apikey):
|
||||
# In order to delete the meter we need its id
|
||||
|
@ -186,16 +181,9 @@ def delete_meter(module, name, apiid, apikey):
|
|||
if meter_id is None:
|
||||
return 1, "Meter does not exist, so can't delete it"
|
||||
else:
|
||||
action = "delete"
|
||||
request = http_request(name, apiid, apikey, action, meter_id)
|
||||
# See http://stackoverflow.com/questions/4511598/how-to-make-http-delete-method-using-urllib2
|
||||
# urllib2 only does GET or POST I believe, but here we need delete
|
||||
request.get_method = lambda: 'DELETE'
|
||||
|
||||
try:
|
||||
result = urllib2.urlopen(request)
|
||||
except urllib2.URLError, e:
|
||||
module.fail_json("Failed to connect to api host to delete meter")
|
||||
response, info = http_request(module, name, apiid, apikey, action, meter_id)
|
||||
if info['status'] != 200:
|
||||
module.fail_json("Failed to delete meter")
|
||||
|
||||
# Each new meter gets a new key.pem and ca.pem file, so they should be deleted
|
||||
types = ['cert', 'key']
|
||||
|
@ -214,17 +202,14 @@ def download_request(module, name, apiid, apikey, cert_type):
|
|||
|
||||
if meter_id is not None:
|
||||
action = "certificates"
|
||||
request = http_request(name, apiid, apikey, action, meter_id, cert_type)
|
||||
|
||||
try:
|
||||
result = urllib2.urlopen(request)
|
||||
except urllib2.URLError, e:
|
||||
response, info = http_request(module, name, apiid, apikey, action, meter_id, cert_type)
|
||||
if info['status'] != 200:
|
||||
module.fail_json("Failed to connect to api host to download certificate")
|
||||
|
||||
if result:
|
||||
try:
|
||||
cert_file_path = '%s/%s.pem' % (config_directory,cert_type)
|
||||
body = result.read()
|
||||
body = response.read()
|
||||
cert_file = open(cert_file_path, 'w')
|
||||
cert_file.write(body)
|
||||
cert_file.close
|
||||
|
@ -238,15 +223,13 @@ def download_request(module, name, apiid, apikey, cert_type):
|
|||
|
||||
def main():
|
||||
|
||||
if not HAS_URLLIB2:
|
||||
module.fail_json(msg="urllib2 is not installed")
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(required=True, choices=['present', 'absent']),
|
||||
name=dict(required=False),
|
||||
apikey=dict(required=True),
|
||||
apiid=dict(required=True),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -268,5 +251,6 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
main()
|
||||
|
||||
|
|
|
@ -54,6 +54,14 @@ options:
|
|||
description: ["An arbitrary string to use for aggregation."]
|
||||
required: false
|
||||
default: null
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
version_added: 1.5.1
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -67,7 +75,6 @@ datadog_event: title="Testing from ansible" text="Test!"
|
|||
'''
|
||||
|
||||
import socket
|
||||
from urllib2 import urlopen, Request, URLError
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
|
@ -90,15 +97,15 @@ def main():
|
|||
choices=['nagios', 'hudson', 'jenkins', 'user', 'my apps',
|
||||
'feed', 'chef', 'puppet', 'git', 'bitbucket', 'fabric',
|
||||
'capistrano']
|
||||
)
|
||||
),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
)
|
||||
)
|
||||
|
||||
post_event(module)
|
||||
|
||||
def post_event(module):
|
||||
uri = "https://app.datadoghq.com/api/v1/events?api_key=" + \
|
||||
module.params['api_key']
|
||||
uri = "https://app.datadoghq.com/api/v1/events?api_key=%s" % module.params['api_key']
|
||||
|
||||
body = dict(
|
||||
title=module.params['title'],
|
||||
|
@ -117,22 +124,20 @@ def post_event(module):
|
|||
|
||||
json_body = module.jsonify(body)
|
||||
headers = {"Content-Type": "application/json"}
|
||||
request = Request(uri, json_body, headers, unverifiable=True)
|
||||
|
||||
try:
|
||||
response = urlopen(request)
|
||||
(response, info) = fetch_url(module, uri, data=json_body, headers=headers)
|
||||
if info['status'] == 200:
|
||||
response_body = response.read()
|
||||
response_json = module.from_json(response_body)
|
||||
if response_json['status'] == 'ok':
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.fail_json(msg=response)
|
||||
|
||||
except URLError, e:
|
||||
module.fail_json(msg="URL error: %s." % e)
|
||||
except socket.error, e:
|
||||
module.fail_json(msg="Socket error: %s to %s" % (e, uri))
|
||||
else:
|
||||
module.fail_json(**info)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
|
||||
main()
|
||||
|
|
169
monitoring/librato_annotation
Normal file
169
monitoring/librato_annotation
Normal file
|
@ -0,0 +1,169 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# (C) Seth Edwards, 2014
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
|
||||
import base64
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: librato_annotation
|
||||
short_description: create an annotation in librato
|
||||
description:
|
||||
- Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically
|
||||
version_added: "1.6"
|
||||
author: Seth Edwards
|
||||
requirements:
|
||||
- urllib2
|
||||
- base64
|
||||
options:
|
||||
user:
|
||||
description:
|
||||
- Librato account username
|
||||
required: true
|
||||
api_key:
|
||||
description:
|
||||
- Librato account api key
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- The annotation stream name
|
||||
- If the annotation stream does not exist, it will be created automatically
|
||||
required: false
|
||||
title:
|
||||
description:
|
||||
- The title of an annotation is a string and may contain spaces
|
||||
- The title should be a short, high-level summary of the annotation e.g. v45 Deployment
|
||||
required: true
|
||||
source:
|
||||
description:
|
||||
- A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population
|
||||
required: false
|
||||
description:
|
||||
description:
|
||||
- The description contains extra meta-data about a particular annotation
|
||||
- The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo!
|
||||
required: false
|
||||
start_time:
|
||||
description:
|
||||
- The unix timestamp indicating the the time at which the event referenced by this annotation started
|
||||
required: false
|
||||
end_time:
|
||||
description:
|
||||
- The unix timestamp indicating the the time at which the event referenced by this annotation ended
|
||||
- For events that have a duration, this is a useful way to annotate the duration of the event
|
||||
required: false
|
||||
links:
|
||||
description:
|
||||
- See examples
|
||||
required: true
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create a simple annotation event with a source
|
||||
- librato_annotation:
|
||||
user: user@example.com
|
||||
api_key: XXXXXXXXXXXXXXXXX
|
||||
title: 'App Config Change'
|
||||
source: 'foo.bar'
|
||||
description: 'This is a detailed description of the config change'
|
||||
|
||||
# Create an annotation that includes a link
|
||||
- librato_annotation:
|
||||
user: user@example.com
|
||||
api_key: XXXXXXXXXXXXXXXXXX
|
||||
name: 'code.deploy'
|
||||
title: 'app code deploy'
|
||||
description: 'this is a detailed description of a deployment'
|
||||
links:
|
||||
- { rel: 'example', href: 'http://www.example.com/deploy' }
|
||||
|
||||
# Create an annotation with a start_time and end_time
|
||||
- librato_annotation:
|
||||
user: user@example.com
|
||||
api_key: XXXXXXXXXXXXXXXXXX
|
||||
name: 'maintenance'
|
||||
title: 'Maintenance window'
|
||||
description: 'This is a detailed description of maintenance'
|
||||
start_time: 1395940006
|
||||
end_time: 1395954406
|
||||
'''
|
||||
|
||||
|
||||
try:
|
||||
import urllib2
|
||||
HAS_URLLIB2 = True
|
||||
except ImportError:
|
||||
HAS_URLLIB2 = False
|
||||
|
||||
def post_annotation(module):
|
||||
user = module.params['user']
|
||||
api_key = module.params['api_key']
|
||||
name = module.params['name']
|
||||
title = module.params['title']
|
||||
|
||||
url = 'https://metrics-api.librato.com/v1/annotations/%s' % name
|
||||
params = {}
|
||||
params['title'] = title
|
||||
|
||||
if module.params['source'] != None:
|
||||
params['source'] = module.params['source']
|
||||
if module.params['description'] != None:
|
||||
params['description'] = module.params['description']
|
||||
if module.params['start_time'] != None:
|
||||
params['start_time'] = module.params['start_time']
|
||||
if module.params['end_time'] != None:
|
||||
params['end_time'] = module.params['end_time']
|
||||
if module.params['links'] != None:
|
||||
params['links'] = module.params['links']
|
||||
|
||||
json_body = module.jsonify(params)
|
||||
|
||||
headers = {}
|
||||
headers['Content-Type'] = 'application/json'
|
||||
headers['Authorization'] = b"Basic " + base64.b64encode(user + b":" + api_key).strip()
|
||||
req = urllib2.Request(url, json_body, headers)
|
||||
try:
|
||||
response = urllib2.urlopen(req)
|
||||
except urllib2.HTTPError as e:
|
||||
module.fail_json(msg="Request Failed", reason=e.reason)
|
||||
response = response.read()
|
||||
module.exit_json(changed=True, annotation=response)
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
user = dict(required=True),
|
||||
api_key = dict(required=True),
|
||||
name = dict(required=False),
|
||||
title = dict(required=True),
|
||||
source = dict(required=False),
|
||||
description = dict(required=False),
|
||||
start_time = dict(required=False, default=None, type='int'),
|
||||
end_time = dict(require=False, default=None, type='int'),
|
||||
links = dict(type='list')
|
||||
)
|
||||
)
|
||||
|
||||
post_annotation(module)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
130
monitoring/logentries
Normal file
130
monitoring/logentries
Normal file
|
@ -0,0 +1,130 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2013, Ivan Vanderbyl <ivan@app.io>
|
||||
#
|
||||
# This module is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: logentries
|
||||
author: Ivan Vanderbyl
|
||||
short_description: Module for tracking logs via logentries.com
|
||||
description:
|
||||
- Sends logs to LogEntries in realtime
|
||||
version_added: "1.6"
|
||||
options:
|
||||
path:
|
||||
description:
|
||||
- path to a log file
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- following state of the log
|
||||
choices: [ 'present', 'absent' ]
|
||||
required: false
|
||||
default: present
|
||||
notes:
|
||||
- Requires the LogEntries agent which can be installed following the instructions at logentries.com
|
||||
'''
|
||||
EXAMPLES = '''
|
||||
- logentries: path=/var/log/nginx/access.log state=present
|
||||
- logentries: path=/var/log/nginx/error.log state=absent
|
||||
'''
|
||||
|
||||
def query_log_status(module, le_path, path, state="present"):
|
||||
""" Returns whether a log is followed or not. """
|
||||
|
||||
if state == "present":
|
||||
rc, out, err = module.run_command("%s followed %s" % (le_path, path))
|
||||
if rc == 0:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def follow_log(module, le_path, logs):
|
||||
""" Follows one or more logs if not already followed. """
|
||||
|
||||
followed_count = 0
|
||||
|
||||
for log in logs:
|
||||
if query_log_status(module, le_path, log):
|
||||
continue
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
rc, out, err = module.run_command([le_path, 'follow', log])
|
||||
|
||||
if not query_log_status(module, le_path, log):
|
||||
module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip()))
|
||||
|
||||
followed_count += 1
|
||||
|
||||
if followed_count > 0:
|
||||
module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,))
|
||||
|
||||
module.exit_json(changed=False, msg="logs(s) already followed")
|
||||
|
||||
def unfollow_log(module, le_path, logs):
|
||||
""" Unfollows one or more logs if followed. """
|
||||
|
||||
removed_count = 0
|
||||
|
||||
# Using a for loop incase of error, we can report the package that failed
|
||||
for log in logs:
|
||||
# Query the log first, to see if we even need to remove.
|
||||
if not query_log_status(module, le_path, log):
|
||||
continue
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
rc, out, err = module.run_command([le_path, 'rm', log])
|
||||
|
||||
if query_log_status(module, le_path, log):
|
||||
module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip()))
|
||||
|
||||
removed_count += 1
|
||||
|
||||
if removed_count > 0:
|
||||
module.exit_json(changed=True, msg="removed %d package(s)" % removed_count)
|
||||
|
||||
module.exit_json(changed=False, msg="logs(s) already unfollowed")
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
path = dict(aliases=["name"], required=True),
|
||||
state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"])
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
le_path = module.get_bin_path('le', True, ['/usr/local/bin'])
|
||||
|
||||
p = module.params
|
||||
|
||||
# Handle multiple log files
|
||||
logs = p["path"].split(",")
|
||||
logs = filter(None, logs)
|
||||
|
||||
if p["state"] in ["present", "followed"]:
|
||||
follow_log(module, le_path, logs)
|
||||
|
||||
elif p["state"] in ["absent", "unfollowed"]:
|
||||
unfollow_log(module, le_path, logs)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
main()
|
|
@ -47,6 +47,7 @@ EXAMPLES = '''
|
|||
- monit: name=httpd state=started
|
||||
'''
|
||||
|
||||
import pipes
|
||||
|
||||
def main():
|
||||
arg_spec = dict(
|
||||
|
@ -67,7 +68,7 @@ def main():
|
|||
rc, out, err = module.run_command('%s reload' % MONIT)
|
||||
module.exit_json(changed=True, name=name, state=state)
|
||||
|
||||
rc, out, err = module.run_command('%s summary | grep "Process \'%s\'"' % (MONIT, name))
|
||||
rc, out, err = module.run_command('%s summary | grep "Process \'%s\'"' % (MONIT, pipes.quote(name)), use_unsafe_shell=True)
|
||||
present = name in out
|
||||
|
||||
if not present and not state == 'present':
|
||||
|
@ -78,7 +79,7 @@ def main():
|
|||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
module.run_command('%s reload' % MONIT, check_rc=True)
|
||||
rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, name))
|
||||
rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, pipes.quote(name)), use_unsafe_shell=True)
|
||||
if name in out:
|
||||
module.exit_json(changed=True, name=name, state=state)
|
||||
else:
|
||||
|
@ -86,7 +87,7 @@ def main():
|
|||
|
||||
module.exit_json(changed=False, name=name, state=state)
|
||||
|
||||
rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, name))
|
||||
rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, pipes.quote(name)), use_unsafe_shell=True)
|
||||
running = 'running' in out.lower()
|
||||
|
||||
if running and (state == 'started' or state == 'monitored'):
|
||||
|
@ -99,7 +100,7 @@ def main():
|
|||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
module.run_command('%s stop %s' % (MONIT, name))
|
||||
rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, name))
|
||||
rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, pipes.quote(name)), use_unsafe_shell=True)
|
||||
if 'not monitored' in out.lower() or 'stop pending' in out.lower():
|
||||
module.exit_json(changed=True, name=name, state=state)
|
||||
module.fail_json(msg=out)
|
||||
|
@ -108,7 +109,8 @@ def main():
|
|||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
module.run_command('%s unmonitor %s' % (MONIT, name))
|
||||
rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, name))
|
||||
# FIXME: DRY FOLKS!
|
||||
rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, pipes.quote(name)), use_unsafe_shell=True)
|
||||
if 'not monitored' in out.lower():
|
||||
module.exit_json(changed=True, name=name, state=state)
|
||||
module.fail_json(msg=out)
|
||||
|
|
|
@ -63,6 +63,14 @@ options:
|
|||
description:
|
||||
- The environment for this deployment
|
||||
required: false
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
version_added: 1.5.1
|
||||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ urllib, urllib2 ]
|
||||
|
@ -75,29 +83,12 @@ EXAMPLES = '''
|
|||
revision=1.0
|
||||
'''
|
||||
|
||||
HAS_URLLIB = True
|
||||
try:
|
||||
import urllib
|
||||
except ImportError:
|
||||
HAS_URLLIB = False
|
||||
|
||||
HAS_URLLIB2 = True
|
||||
try:
|
||||
import urllib2
|
||||
except ImportError:
|
||||
HAS_URLLIB2 = False
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
def main():
|
||||
|
||||
if not HAS_URLLIB:
|
||||
module.fail_json(msg="urllib is not installed")
|
||||
if not HAS_URLLIB2:
|
||||
module.fail_json(msg="urllib2 is not installed")
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
token=dict(required=True),
|
||||
|
@ -109,6 +100,7 @@ def main():
|
|||
user=dict(required=False),
|
||||
appname=dict(required=False),
|
||||
environment=dict(required=False),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
@ -134,29 +126,20 @@ def main():
|
|||
module.exit_json(changed=True)
|
||||
|
||||
# Send the data to NewRelic
|
||||
try:
|
||||
req = urllib2.Request("https://rpm.newrelic.com/deployments.xml", urllib.urlencode(params))
|
||||
req.add_header('x-api-key',module.params["token"])
|
||||
result=urllib2.urlopen(req)
|
||||
# urlopen behaves differently in python 2.4 and 2.6 so we handle
|
||||
# both cases here. In python 2.4 it throws an exception if the
|
||||
# return code is anything other than a 200. In python 2.6 it
|
||||
# doesn't throw an exception for any 2xx return codes. In both
|
||||
# cases we expect newrelic should return a 201 on success. So
|
||||
# to handle both cases, both the except & else cases below are
|
||||
# effectively identical.
|
||||
except Exception, e:
|
||||
if e.code == 201:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.fail_json(msg="unable to update newrelic: %s" % e)
|
||||
url = "https://rpm.newrelic.com/deployments.xml"
|
||||
data = urllib.urlencode(params)
|
||||
headers = {
|
||||
'x-api-key': module.params["token"],
|
||||
}
|
||||
response, info = fetch_url(module, url, data=data, headers=headers)
|
||||
if info['status'] in (200, 201):
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
if result.code == 201:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.fail_json(msg="result code: %d" % result.code)
|
||||
module.fail_json(msg="unable to update newrelic: %s" % info['msg'])
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
|
||||
main()
|
||||
|
||||
|
|
|
@ -85,6 +85,15 @@ options:
|
|||
default: Created by Ansible
|
||||
choices: []
|
||||
aliases: []
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
version_added: 1.5.1
|
||||
|
||||
notes:
|
||||
- This module does not yet have support to end maintenance windows.
|
||||
'''
|
||||
|
@ -124,9 +133,15 @@ EXAMPLES='''
|
|||
|
||||
import json
|
||||
import datetime
|
||||
import urllib2
|
||||
import base64
|
||||
|
||||
def auth_header(user, passwd, token):
|
||||
if token:
|
||||
return "Token token=%s" % token
|
||||
|
||||
auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
|
||||
return "Basic %s" % auth
|
||||
|
||||
def create_req(url, data, name, user, passwd, token):
|
||||
req = urllib2.Request(url, data)
|
||||
if token:
|
||||
|
@ -134,39 +149,42 @@ def create_req(url, data, name, user, passwd, token):
|
|||
else:
|
||||
auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
|
||||
req.add_header("Authorization", "Basic %s" % auth)
|
||||
|
||||
return req
|
||||
|
||||
def ongoing(name, user, passwd, token):
|
||||
def ongoing(module, name, user, passwd, token):
|
||||
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/ongoing"
|
||||
req = create_req(url, None, name, user, passwd, token)
|
||||
res = urllib2.urlopen(req)
|
||||
out = res.read()
|
||||
headers = {"Authorization": auth_header(user, passwd, token)}
|
||||
|
||||
return False, out
|
||||
response, info = fetch_url(module, url, headers=headers)
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg'])
|
||||
|
||||
return False, response.read()
|
||||
|
||||
|
||||
def create(name, user, passwd, token, requester_id, service, hours, minutes, desc):
|
||||
|
||||
def create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc):
|
||||
now = datetime.datetime.utcnow()
|
||||
later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes))
|
||||
start = now.strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
end = later.strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows"
|
||||
auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
|
||||
headers = {
|
||||
'Authorization': auth_header(user, passwd, token),
|
||||
'Content-Type' : 'application/json',
|
||||
}
|
||||
request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': [service]}}
|
||||
if requester_id:
|
||||
request_data['requester_id'] = requester_id
|
||||
|
||||
data = json.dumps(request_data)
|
||||
|
||||
req = create_req(url, data, name, user, passwd, token)
|
||||
req.add_header('Content-Type', 'application/json')
|
||||
response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="failed to create the window: %s" % info['msg'])
|
||||
|
||||
res = urllib2.urlopen(req)
|
||||
out = res.read()
|
||||
|
||||
return False, out
|
||||
return False, response.read()
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -182,7 +200,8 @@ def main():
|
|||
requester_id=dict(required=False),
|
||||
hours=dict(default='1', required=False),
|
||||
minutes=dict(default='0', required=False),
|
||||
desc=dict(default='Created by Ansible', required=False)
|
||||
desc=dict(default='Created by Ansible', required=False),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -204,10 +223,10 @@ def main():
|
|||
if state == "running" or state == "started":
|
||||
if not service:
|
||||
module.fail_json(msg="service not specified")
|
||||
(rc, out) = create(name, user, passwd, token, requester_id, service, hours, minutes, desc)
|
||||
(rc, out) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc)
|
||||
|
||||
if state == "ongoing":
|
||||
(rc, out) = ongoing(name, user, passwd, token)
|
||||
(rc, out) = ongoing(module, name, user, passwd, token)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed", result=out)
|
||||
|
@ -216,4 +235,6 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
|
||||
main()
|
||||
|
|
133
monitoring/rollbar_deployment
Normal file
133
monitoring/rollbar_deployment
Normal file
|
@ -0,0 +1,133 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2014, Max Riveiro, <kavu13@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rollbar_deployment
|
||||
version_added: 1.6
|
||||
author: Max Riveiro
|
||||
short_description: Notify Rollbar about app deployments
|
||||
description:
|
||||
- Notify Rollbar about app deployments
|
||||
(see https://rollbar.com/docs/deploys_other/)
|
||||
options:
|
||||
token:
|
||||
description:
|
||||
- Your project access token.
|
||||
required: true
|
||||
environment:
|
||||
description:
|
||||
- Name of the environment being deployed, e.g. 'production'.
|
||||
required: true
|
||||
revision:
|
||||
description:
|
||||
- Revision number/sha being deployed.
|
||||
required: true
|
||||
user:
|
||||
description:
|
||||
- User who deployed.
|
||||
required: false
|
||||
rollbar_user:
|
||||
description:
|
||||
- Rollbar username of the user who deployed.
|
||||
required: false
|
||||
comment:
|
||||
description:
|
||||
- Deploy comment (e.g. what is being deployed).
|
||||
required: false
|
||||
url:
|
||||
description:
|
||||
- Optional URL to submit the notification to.
|
||||
required: false
|
||||
default: 'https://api.rollbar.com/api/1/deploy/'
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates for the target url will not be validated.
|
||||
This should only be used on personally controlled sites using
|
||||
self-signed certificates.
|
||||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- rollbar_deployment: token=AAAAAA
|
||||
environment='staging'
|
||||
user='ansible'
|
||||
revision=4.2,
|
||||
rollbar_user='admin',
|
||||
comment='Test Deploy'
|
||||
'''
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
token=dict(required=True),
|
||||
environment=dict(required=True),
|
||||
revision=dict(required=True),
|
||||
user=dict(required=False),
|
||||
rollbar_user=dict(required=False),
|
||||
comment=dict(required=False),
|
||||
url=dict(
|
||||
required=False,
|
||||
default='https://api.rollbar.com/api/1/deploy/'
|
||||
),
|
||||
validate_certs=dict(default='yes', type='bool'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
params = dict(
|
||||
access_token=module.params['token'],
|
||||
environment=module.params['environment'],
|
||||
revision=module.params['revision']
|
||||
)
|
||||
|
||||
if module.params['user']:
|
||||
params['local_username'] = module.params['user']
|
||||
|
||||
if module.params['rollbar_user']:
|
||||
params['rollbar_username'] = module.params['rollbar_user']
|
||||
|
||||
if module.params['comment']:
|
||||
params['comment'] = module.params['comment']
|
||||
|
||||
url = module.params.get('url')
|
||||
|
||||
try:
|
||||
data = urllib.urlencode(params)
|
||||
response, info = fetch_url(module, url, data=data)
|
||||
except Exception, e:
|
||||
module.fail_json(msg='Unable to notify Rollbar: %s' % e)
|
||||
else:
|
||||
if info['status'] == 200:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url))
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
|
||||
main()
|
1670
net_infrastructure/bigip_facts
Normal file
1670
net_infrastructure/bigip_facts
Normal file
File diff suppressed because it is too large
Load diff
302
net_infrastructure/dnsimple
Executable file
302
net_infrastructure/dnsimple
Executable file
|
@ -0,0 +1,302 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: dnsimple
|
||||
version_added: "1.6"
|
||||
short_description: Interface with dnsimple.com (a DNS hosting service).
|
||||
description:
|
||||
- "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)"
|
||||
options:
|
||||
account_email:
|
||||
description:
|
||||
- "Account email. If ommitted, the env variables DNSIMPLE_EMAIL and DNSIMPLE_API_TOKEN will be looked for. If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)"
|
||||
required: false
|
||||
default: null
|
||||
|
||||
account_api_token:
|
||||
description:
|
||||
- Account API token. See I(account_email) for info.
|
||||
required: false
|
||||
default: null
|
||||
|
||||
domain:
|
||||
description:
|
||||
- Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. If ommitted, a list of domains will be returned.
|
||||
- If domain is present but the domain doesn't exist, it will be created.
|
||||
required: false
|
||||
default: null
|
||||
|
||||
record:
|
||||
description:
|
||||
- Record to add, if blank a record for the domain will be created, supports the wildcard (*)
|
||||
required: false
|
||||
default: null
|
||||
|
||||
record_ids:
|
||||
description:
|
||||
- List of records to ensure they either exist or don't exist
|
||||
required: false
|
||||
default: null
|
||||
|
||||
type:
|
||||
description:
|
||||
- The type of DNS record to create
|
||||
required: false
|
||||
choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ]
|
||||
default: null
|
||||
|
||||
ttl:
|
||||
description:
|
||||
- The TTL to give the new record
|
||||
required: false
|
||||
default: 3600 (one hour)
|
||||
|
||||
value:
|
||||
description:
|
||||
- Record value
|
||||
- "Must be specified when trying to ensure a record exists"
|
||||
required: false
|
||||
default: null
|
||||
|
||||
priority:
|
||||
description:
|
||||
- Record priority
|
||||
required: false
|
||||
default: null
|
||||
|
||||
state:
|
||||
description:
|
||||
- whether the record should exist or not
|
||||
required: false
|
||||
choices: [ 'present', 'absent' ]
|
||||
default: null
|
||||
|
||||
solo:
|
||||
description:
|
||||
- Whether the record should be the only one for that record type and record name. Only use with state=present on a record
|
||||
required: false
|
||||
default: null
|
||||
|
||||
requirements: [ dnsimple ]
|
||||
author: Alex Coomans
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# authenicate using email and API token
|
||||
- local_action: dnsimple account_email=test@example.com account_api_token=dummyapitoken
|
||||
|
||||
# fetch all domains
|
||||
- local_action dnsimple
|
||||
register: domains
|
||||
|
||||
# fetch my.com domain records
|
||||
- local_action: dnsimple domain=my.com state=present
|
||||
register: records
|
||||
|
||||
# delete a domain
|
||||
- local_action: dnsimple domain=my.com state=absent
|
||||
|
||||
# create a test.my.com A record to point to 127.0.0.01
|
||||
- local_action: dnsimple domain=my.com record=test type=A value=127.0.0.1
|
||||
register: record
|
||||
|
||||
# and then delete it
|
||||
- local_action: dnsimple domain=my.com record_ids={{ record['id'] }}
|
||||
|
||||
# create a my.com CNAME record to example.com
|
||||
- local_action: dnsimple domain=my.com record= type=CNAME value=example.com state=present
|
||||
|
||||
# change it's ttl
|
||||
- local_action: dnsimple domain=my.com record= type=CNAME value=example.com ttl=600 state=present
|
||||
|
||||
# and delete the record
|
||||
- local_action: dnsimpledomain=my.com record= type=CNAME value=example.com state=absent
|
||||
|
||||
'''
|
||||
|
||||
import os
|
||||
try:
|
||||
from dnsimple import DNSimple
|
||||
from dnsimple.dnsimple import DNSimpleException
|
||||
except ImportError:
|
||||
print "failed=True msg='dnsimple required for this module'"
|
||||
sys.exit(1)
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
account_email = dict(required=False),
|
||||
account_api_token = dict(required=False, no_log=True),
|
||||
domain = dict(required=False),
|
||||
record = dict(required=False),
|
||||
record_ids = dict(required=False, type='list'),
|
||||
type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']),
|
||||
ttl = dict(required=False, default=3600, type='int'),
|
||||
value = dict(required=False),
|
||||
priority = dict(required=False, type='int'),
|
||||
state = dict(required=False, choices=['present', 'absent']),
|
||||
solo = dict(required=False, type='bool'),
|
||||
),
|
||||
required_together = (
|
||||
['record', 'value']
|
||||
),
|
||||
supports_check_mode = True,
|
||||
)
|
||||
|
||||
account_email = module.params.get('account_email')
|
||||
account_api_token = module.params.get('account_api_token')
|
||||
domain = module.params.get('domain')
|
||||
record = module.params.get('record')
|
||||
record_ids = module.params.get('record_ids')
|
||||
record_type = module.params.get('type')
|
||||
ttl = module.params.get('ttl')
|
||||
value = module.params.get('value')
|
||||
priority = module.params.get('priority')
|
||||
state = module.params.get('state')
|
||||
is_solo = module.params.get('solo')
|
||||
|
||||
if account_email and account_api_token:
|
||||
client = DNSimple(email=account_email, api_token=account_api_token)
|
||||
elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'):
|
||||
client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN'))
|
||||
else:
|
||||
client = DNSimple()
|
||||
|
||||
try:
|
||||
# Let's figure out what operation we want to do
|
||||
|
||||
# No domain, return a list
|
||||
if not domain:
|
||||
domains = client.domains()
|
||||
module.exit_json(changed=False, result=[d['domain'] for d in domains])
|
||||
|
||||
# Domain & No record
|
||||
if domain and record is None and not record_ids:
|
||||
domains = [d['domain'] for d in client.domains()]
|
||||
if domain.isdigit():
|
||||
dr = next((d for d in domains if d['id'] == int(domain)), None)
|
||||
else:
|
||||
dr = next((d for d in domains if d['name'] == domain), None)
|
||||
if state == 'present':
|
||||
if dr:
|
||||
module.exit_json(changed=False, result=dr)
|
||||
else:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.exit_json(changed=True, result=client.add_domain(domain)['domain'])
|
||||
elif state == 'absent':
|
||||
if dr:
|
||||
if not module.check_mode:
|
||||
client.delete(domain)
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
|
||||
|
||||
# need the not none check since record could be an empty string
|
||||
if domain and record is not None:
|
||||
records = [r['record'] for r in client.records(str(domain))]
|
||||
|
||||
if not record_type:
|
||||
module.fail_json(msg="Missing the record type")
|
||||
|
||||
if not value:
|
||||
module.fail_json(msg="Missing the record value")
|
||||
|
||||
rr = next((r for r in records if r['name'] == record and r['record_type'] == record_type and r['content'] == value), None)
|
||||
|
||||
if state == 'present':
|
||||
changed = False
|
||||
if is_solo:
|
||||
# delete any records that have the same name and record type
|
||||
same_type = [r['id'] for r in records if r['name'] == record and r['record_type'] == record_type]
|
||||
if rr:
|
||||
same_type = [rid for rid in same_type if rid != rr['id']]
|
||||
if same_type:
|
||||
if not module.check_mode:
|
||||
for rid in same_type:
|
||||
client.delete_record(str(domain), rid)
|
||||
changed = True
|
||||
if rr:
|
||||
# check if we need to update
|
||||
if rr['ttl'] != ttl or rr['prio'] != priority:
|
||||
data = {}
|
||||
if ttl: data['ttl'] = ttl
|
||||
if priority: data['prio'] = priority
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record'])
|
||||
else:
|
||||
module.exit_json(changed=changed, result=rr)
|
||||
else:
|
||||
# create it
|
||||
data = {
|
||||
'name': record,
|
||||
'record_type': record_type,
|
||||
'content': value,
|
||||
}
|
||||
if ttl: data['ttl'] = ttl
|
||||
if priority: data['prio'] = priority
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.exit_json(changed=True, result=client.add_record(str(domain), data)['record'])
|
||||
elif state == 'absent':
|
||||
if rr:
|
||||
if not module.check_mode:
|
||||
client.delete_record(str(domain), rr['id'])
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
|
||||
|
||||
# Make sure these record_ids either all exist or none
|
||||
if domain and record_ids:
|
||||
current_records = [str(r['record']['id']) for r in client.records(str(domain))]
|
||||
wanted_records = [str(r) for r in record_ids]
|
||||
if state == 'present':
|
||||
difference = list(set(wanted_records) - set(current_records))
|
||||
if difference:
|
||||
module.fail_json(msg="Missing the following records: %s" % difference)
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
elif state == 'absent':
|
||||
difference = list(set(wanted_records) & set(current_records))
|
||||
if difference:
|
||||
if not module.check_mode:
|
||||
for rid in difference:
|
||||
client.delete_record(str(domain), rid)
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
|
||||
|
||||
except DNSimpleException, e:
|
||||
module.fail_json(msg="Unable to contact DNSimple: %s" % e.message)
|
||||
|
||||
module.fail_json(msg="Unknown what you wanted me to do")
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
main()
|
|
@ -73,6 +73,15 @@ options:
|
|||
choices: [ 'present', 'absent' ]
|
||||
default: null
|
||||
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
version_added: 1.5.1
|
||||
|
||||
notes:
|
||||
- The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP.
|
||||
- This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks.
|
||||
|
@ -106,8 +115,6 @@ EXAMPLES = '''
|
|||
|
||||
IMPORT_ERROR = None
|
||||
try:
|
||||
import urllib
|
||||
import urllib2
|
||||
import json
|
||||
from time import strftime, gmtime
|
||||
import hashlib
|
||||
|
@ -115,22 +122,6 @@ try:
|
|||
except ImportError, e:
|
||||
IMPORT_ERROR = str(e)
|
||||
|
||||
|
||||
class RequestWithMethod(urllib2.Request):
|
||||
|
||||
"""Workaround for using DELETE/PUT/etc with urllib2"""
|
||||
|
||||
def __init__(self, url, method, data=None, headers={}):
|
||||
self._method = method
|
||||
urllib2.Request.__init__(self, url, data, headers)
|
||||
|
||||
def get_method(self):
|
||||
if self._method:
|
||||
return self._method
|
||||
else:
|
||||
return urllib2.Request.get_method(self)
|
||||
|
||||
|
||||
class DME2:
|
||||
|
||||
def __init__(self, apikey, secret, domain, module):
|
||||
|
@ -138,7 +129,7 @@ class DME2:
|
|||
|
||||
self.api = apikey
|
||||
self.secret = secret
|
||||
self.baseurl = 'http://api.dnsmadeeasy.com/V2.0/'
|
||||
self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/'
|
||||
self.domain = str(domain)
|
||||
self.domain_map = None # ["domain_name"] => ID
|
||||
self.record_map = None # ["record_name"] => ID
|
||||
|
@ -169,21 +160,15 @@ class DME2:
|
|||
url = self.baseurl + resource
|
||||
if data and not isinstance(data, basestring):
|
||||
data = urllib.urlencode(data)
|
||||
request = RequestWithMethod(url, method, data, self._headers())
|
||||
|
||||
try:
|
||||
response = urllib2.urlopen(request)
|
||||
except urllib2.HTTPError, e:
|
||||
self.module.fail_json(
|
||||
msg="%s returned %s, with body: %s" % (url, e.code, e.read()))
|
||||
except Exception, e:
|
||||
self.module.fail_json(
|
||||
msg="Failed contacting: %s : Exception %s" % (url, e.message()))
|
||||
response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers())
|
||||
if info['status'] not in (200, 201, 204):
|
||||
self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
|
||||
|
||||
try:
|
||||
return json.load(response)
|
||||
except Exception, e:
|
||||
return False
|
||||
return {}
|
||||
|
||||
def getDomain(self, domain_id):
|
||||
if not self.domain_map:
|
||||
|
@ -263,6 +248,7 @@ def main():
|
|||
'A', 'AAAA', 'CNAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']),
|
||||
record_value=dict(required=False),
|
||||
record_ttl=dict(required=False, default=1800, type='int'),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
),
|
||||
required_together=(
|
||||
['record_value', 'record_ttl', 'record_type']
|
||||
|
@ -282,7 +268,7 @@ def main():
|
|||
domain_records = DME.getRecords()
|
||||
if not domain_records:
|
||||
module.fail_json(
|
||||
msg="The %s domain name is not accessible with this api_key; try using its ID if known." % domain)
|
||||
msg="The requested domain name is not accessible with this api_key; try using its ID if known.")
|
||||
module.exit_json(changed=False, result=domain_records)
|
||||
|
||||
# Fetch existing record + Build new one
|
||||
|
@ -338,4 +324,6 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
|
||||
main()
|
||||
|
|
83
net_infrastructure/lldp
Executable file
83
net_infrastructure/lldp
Executable file
|
@ -0,0 +1,83 @@
|
|||
#!/usr/bin/python -tt
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import subprocess
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: lldp
|
||||
version_added: 1.6
|
||||
short_description: get details reported by lldp
|
||||
description:
|
||||
- Reads data out of lldpctl
|
||||
options: {}
|
||||
author: Andy Hill
|
||||
notes:
|
||||
- Requires lldpd running and lldp enabled on switches
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Retrieve switch/port information
|
||||
- name: Gather information from lldp
|
||||
lldp:
|
||||
|
||||
- name: Print each switch/port
|
||||
debug: msg="{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifalias'] }}
|
||||
with_items: lldp.keys()
|
||||
|
||||
# TASK: [Print each switch/port] ***********************************************************
|
||||
# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"}
|
||||
# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"}
|
||||
# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"}
|
||||
|
||||
'''
|
||||
|
||||
def gather_lldp():
|
||||
cmd = ['lldpctl', '-f', 'keyvalue']
|
||||
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
||||
(output, err) = proc.communicate()
|
||||
if output:
|
||||
output_dict = {}
|
||||
lldp_entries = output.split("\n")
|
||||
|
||||
for entry in lldp_entries:
|
||||
if entry:
|
||||
path, value = entry.strip().split("=", 1)
|
||||
path = path.split(".")
|
||||
path_components, final = path[:-1], path[-1]
|
||||
|
||||
current_dict = output_dict
|
||||
for path_component in path_components:
|
||||
current_dict[path_component] = current_dict.get(path_component, {})
|
||||
current_dict = current_dict[path_component]
|
||||
current_dict[final] = value
|
||||
return output_dict
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule({})
|
||||
|
||||
lldp_output = gather_lldp()
|
||||
try:
|
||||
data = {'lldp': lldp_output['lldp']}
|
||||
module.exit_json(ansible_facts=data)
|
||||
except TypeError:
|
||||
module.fail_json(msg="lldpctl command failed. is lldpd running?")
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
||||
|
|
@ -73,6 +73,14 @@ options:
|
|||
default: server
|
||||
choices: ["server", "service"]
|
||||
aliases: []
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates for the target url will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
|
||||
requirements: [ "urllib", "urllib2" ]
|
||||
author: Nandor Sivok
|
||||
'''
|
||||
|
@ -90,8 +98,6 @@ ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=api
|
|||
|
||||
|
||||
import json
|
||||
import urllib
|
||||
import urllib2
|
||||
import base64
|
||||
import socket
|
||||
|
||||
|
@ -100,23 +106,25 @@ class netscaler(object):
|
|||
|
||||
_nitro_base_url = '/nitro/v1/'
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
def http_request(self, api_endpoint, data_json={}):
|
||||
request_url = self._nsc_protocol + '://' + self._nsc_host + self._nitro_base_url + api_endpoint
|
||||
|
||||
data_json = urllib.urlencode(data_json)
|
||||
if not len(data_json):
|
||||
data_json = None
|
||||
|
||||
if len(data_json):
|
||||
req = urllib2.Request(request_url, data_json)
|
||||
req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
else:
|
||||
req = urllib2.Request(request_url)
|
||||
auth = base64.encodestring('%s:%s' % (self._nsc_user, self._nsc_pass)).replace('\n', '').strip()
|
||||
headers = {
|
||||
'Authorization': 'Basic %s' % auth,
|
||||
'Content-Type' : 'application/x-www-form-urlencoded',
|
||||
}
|
||||
|
||||
base64string = base64.encodestring('%s:%s' % (self._nsc_user, self._nsc_pass)).replace('\n', '').strip()
|
||||
req.add_header('Authorization', "Basic %s" % base64string)
|
||||
response, info = fetch_url(self.module, request_url, data=data_json)
|
||||
|
||||
resp = urllib2.urlopen(req)
|
||||
resp = json.load(resp)
|
||||
|
||||
return resp
|
||||
return json.load(response.read())
|
||||
|
||||
def prepare_request(self, action):
|
||||
resp = self.http_request(
|
||||
|
@ -134,7 +142,7 @@ class netscaler(object):
|
|||
|
||||
|
||||
def core(module):
|
||||
n = netscaler()
|
||||
n = netscaler(module)
|
||||
n._nsc_host = module.params.get('nsc_host')
|
||||
n._nsc_user = module.params.get('user')
|
||||
n._nsc_pass = module.params.get('password')
|
||||
|
@ -158,7 +166,8 @@ def main():
|
|||
password = dict(required=True),
|
||||
action = dict(default='enable', choices=['enable','disable']),
|
||||
name = dict(default=socket.gethostname()),
|
||||
type = dict(default='server', choices=['service', 'server'])
|
||||
type = dict(default='server', choices=['service', 'server']),
|
||||
validate_certs=dict(default='yes', type='bool'),
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -177,4 +186,5 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
main()
|
||||
|
|
112
network/get_url
112
network/get_url
|
@ -83,6 +83,13 @@ options:
|
|||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
others:
|
||||
description:
|
||||
- all arguments accepted by the M(file) module also work here
|
||||
|
@ -108,19 +115,6 @@ try:
|
|||
except ImportError:
|
||||
HAS_HASHLIB=False
|
||||
|
||||
try:
|
||||
import urllib2
|
||||
HAS_URLLIB2 = True
|
||||
except ImportError:
|
||||
HAS_URLLIB2 = False
|
||||
|
||||
try:
|
||||
import urlparse
|
||||
import socket
|
||||
HAS_URLPARSE = True
|
||||
except ImportError:
|
||||
HAS_URLPARSE=False
|
||||
|
||||
# ==============================================================
|
||||
# url handling
|
||||
|
||||
|
@ -130,72 +124,6 @@ def url_filename(url):
|
|||
return 'index.html'
|
||||
return fn
|
||||
|
||||
def url_do_get(module, url, dest, use_proxy, last_mod_time, force):
|
||||
"""
|
||||
Get url and return request and info
|
||||
Credits: http://stackoverflow.com/questions/7006574/how-to-download-file-from-ftp
|
||||
"""
|
||||
|
||||
USERAGENT = 'ansible-httpget'
|
||||
info = dict(url=url, dest=dest)
|
||||
r = None
|
||||
handlers = []
|
||||
|
||||
parsed = urlparse.urlparse(url)
|
||||
|
||||
if '@' in parsed[1]:
|
||||
credentials, netloc = parsed[1].split('@', 1)
|
||||
if ':' in credentials:
|
||||
username, password = credentials.split(':', 1)
|
||||
else:
|
||||
username = credentials
|
||||
password = ''
|
||||
parsed = list(parsed)
|
||||
parsed[1] = netloc
|
||||
|
||||
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
|
||||
# this creates a password manager
|
||||
passman.add_password(None, netloc, username, password)
|
||||
# because we have put None at the start it will always
|
||||
# use this username/password combination for urls
|
||||
# for which `theurl` is a super-url
|
||||
|
||||
authhandler = urllib2.HTTPBasicAuthHandler(passman)
|
||||
# create the AuthHandler
|
||||
handlers.append(authhandler)
|
||||
|
||||
#reconstruct url without credentials
|
||||
url = urlparse.urlunparse(parsed)
|
||||
|
||||
if not use_proxy:
|
||||
proxyhandler = urllib2.ProxyHandler({})
|
||||
handlers.append(proxyhandler)
|
||||
|
||||
opener = urllib2.build_opener(*handlers)
|
||||
urllib2.install_opener(opener)
|
||||
request = urllib2.Request(url)
|
||||
request.add_header('User-agent', USERAGENT)
|
||||
|
||||
if last_mod_time and not force:
|
||||
tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
|
||||
request.add_header('If-Modified-Since', tstamp)
|
||||
else:
|
||||
request.add_header('cache-control', 'no-cache')
|
||||
|
||||
try:
|
||||
r = urllib2.urlopen(request)
|
||||
info.update(r.info())
|
||||
info['url'] = r.geturl() # The URL goes in too, because of redirects.
|
||||
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200))
|
||||
except urllib2.HTTPError, e:
|
||||
# Must not fail_json() here so caller can handle HTTP 304 unmodified
|
||||
info.update(dict(msg=str(e), status=e.code))
|
||||
except urllib2.URLError, e:
|
||||
code = getattr(e, 'code', -1)
|
||||
module.fail_json(msg="Request failed: %s" % str(e), status_code=code)
|
||||
|
||||
return r, info
|
||||
|
||||
def url_get(module, url, dest, use_proxy, last_mod_time, force):
|
||||
"""
|
||||
Download data from the url and store in a temporary file.
|
||||
|
@ -203,7 +131,7 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force):
|
|||
Return (tempfile, info about the request)
|
||||
"""
|
||||
|
||||
req, info = url_do_get(module, url, dest, use_proxy, last_mod_time, force)
|
||||
rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time)
|
||||
|
||||
if info['status'] == 304:
|
||||
module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', ''))
|
||||
|
@ -215,12 +143,12 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force):
|
|||
fd, tempname = tempfile.mkstemp()
|
||||
f = os.fdopen(fd, 'wb')
|
||||
try:
|
||||
shutil.copyfileobj(req, f)
|
||||
shutil.copyfileobj(rsp, f)
|
||||
except Exception, err:
|
||||
os.remove(tempname)
|
||||
module.fail_json(msg="failed to create temporary content file: %s" % str(err))
|
||||
f.close()
|
||||
req.close()
|
||||
rsp.close()
|
||||
return tempname, info
|
||||
|
||||
def extract_filename_from_headers(headers):
|
||||
|
@ -247,21 +175,16 @@ def extract_filename_from_headers(headers):
|
|||
|
||||
def main():
|
||||
|
||||
# does this really happen on non-ancient python?
|
||||
if not HAS_URLLIB2:
|
||||
module.fail_json(msg="urllib2 is not installed")
|
||||
if not HAS_URLPARSE:
|
||||
module.fail_json(msg="urlparse is not installed")
|
||||
argument_spec = url_argument_spec()
|
||||
argument_spec.update(
|
||||
url = dict(required=True),
|
||||
dest = dict(required=True),
|
||||
sha256sum = dict(default=''),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
# not checking because of daisy chain to file module
|
||||
argument_spec = dict(
|
||||
url = dict(required=True),
|
||||
dest = dict(required=True),
|
||||
force = dict(default='no', aliases=['thirsty'], type='bool'),
|
||||
sha256sum = dict(default=''),
|
||||
use_proxy = dict(default='yes', type='bool')
|
||||
),
|
||||
argument_spec = argument_spec,
|
||||
add_file_common_args=True
|
||||
)
|
||||
|
||||
|
@ -366,4 +289,5 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
main()
|
||||
|
|
42
network/uri
42
network/uri
|
@ -106,7 +106,7 @@ options:
|
|||
required: false
|
||||
status_code:
|
||||
description:
|
||||
- A valid, numeric, HTTP status code that signifies success of the request.
|
||||
- A valid, numeric, HTTP status code that signifies success of the request. Can also be comma separated list of status codes.
|
||||
required: false
|
||||
default: 200
|
||||
timeout:
|
||||
|
@ -143,23 +143,29 @@ EXAMPLES = '''
|
|||
when: 'AWESOME' not in "{{ webpage.content }}"
|
||||
|
||||
|
||||
# Create a JIRA issue.
|
||||
- action: >
|
||||
uri url=https://your.jira.example.com/rest/api/2/issue/
|
||||
method=POST user=your_username password=your_pass
|
||||
body="{{ lookup('file','issue.json') }}" force_basic_auth=yes
|
||||
status_code=201 HEADER_Content-Type="application/json"
|
||||
# Create a JIRA issue
|
||||
|
||||
- action: >
|
||||
uri url=https://your.form.based.auth.examle.com/index.php
|
||||
method=POST body="name=your_username&password=your_password&enter=Sign%20in"
|
||||
status_code=302 HEADER_Content-Type="application/x-www-form-urlencoded"
|
||||
register: login
|
||||
- uri: url=https://your.jira.example.com/rest/api/2/issue/
|
||||
method=POST user=your_username password=your_pass
|
||||
body="{{ lookup('file','issue.json') }}" force_basic_auth=yes
|
||||
status_code=201 HEADER_Content-Type="application/json"
|
||||
|
||||
# Login to a form based webpage, then use the returned cookie to
|
||||
# access the app in later tasks.
|
||||
- action: uri url=https://your.form.based.auth.example.com/dashboard.php
|
||||
method=GET return_content=yes HEADER_Cookie="{{login.set_cookie}}"
|
||||
# access the app in later tasks
|
||||
|
||||
- uri: url=https://your.form.based.auth.examle.com/index.php
|
||||
method=POST body="name=your_username&password=your_password&enter=Sign%20in"
|
||||
status_code=302 HEADER_Content-Type="application/x-www-form-urlencoded"
|
||||
register: login
|
||||
|
||||
- uri: url=https://your.form.based.auth.example.com/dashboard.php
|
||||
method=GET return_content=yes HEADER_Cookie="{{login.set_cookie}}"
|
||||
|
||||
# Queue build of a project in Jenkins:
|
||||
|
||||
- uri: url=http://{{jenkins.host}}/job/{{jenkins.job}}/build?token={{jenkins.token}}
|
||||
method=GET user={{jenkins.user}} password={{jenkins.password}} force_basic_auth=yes status_code=201
|
||||
|
||||
'''
|
||||
|
||||
HAS_HTTPLIB2 = True
|
||||
|
@ -335,7 +341,7 @@ def main():
|
|||
follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']),
|
||||
creates = dict(required=False, default=None),
|
||||
removes = dict(required=False, default=None),
|
||||
status_code = dict(required=False, default=200, type='int'),
|
||||
status_code = dict(required=False, default=[200], type='list'),
|
||||
timeout = dict(required=False, default=30, type='int'),
|
||||
),
|
||||
check_invalid_arguments=False,
|
||||
|
@ -358,7 +364,7 @@ def main():
|
|||
redirects = module.params['follow_redirects']
|
||||
creates = module.params['creates']
|
||||
removes = module.params['removes']
|
||||
status_code = int(module.params['status_code'])
|
||||
status_code = [int(x) for x in list(module.params['status_code'])]
|
||||
socket_timeout = module.params['timeout']
|
||||
|
||||
# Grab all the http headers. Need this hack since passing multi-values is currently a bit ugly. (e.g. headers='{"Content-Type":"application/json"}')
|
||||
|
@ -427,7 +433,7 @@ def main():
|
|||
uresp['json'] = js
|
||||
except:
|
||||
pass
|
||||
if resp['status'] != status_code:
|
||||
if resp['status'] not in status_code:
|
||||
module.fail_json(msg="Status code was not " + str(status_code), content=content, **uresp)
|
||||
elif return_content:
|
||||
module.exit_json(changed=changed, content=content, **uresp)
|
||||
|
|
|
@ -76,6 +76,14 @@ options:
|
|||
description:
|
||||
- (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
|
||||
required: false
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
version_added: 1.5.1
|
||||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ urllib, urllib2 ]
|
||||
|
@ -96,31 +104,12 @@ EXAMPLES = '''
|
|||
tags=tag1,tag2,tag3
|
||||
'''
|
||||
|
||||
HAS_URLLIB = True
|
||||
try:
|
||||
import urllib
|
||||
except ImportError:
|
||||
HAS_URLLIB = False
|
||||
|
||||
HAS_URLLIB2 = True
|
||||
try:
|
||||
import urllib2
|
||||
except ImportError:
|
||||
HAS_URLLIB2 = False
|
||||
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
def main():
|
||||
|
||||
if not HAS_URLLIB:
|
||||
module.fail_json(msg="urllib is not installed")
|
||||
if not HAS_URLLIB2:
|
||||
module.fail_json(msg="urllib2 is not installed")
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
token=dict(required=True),
|
||||
|
@ -135,6 +124,7 @@ def main():
|
|||
project=dict(required=False),
|
||||
tags=dict(required=False),
|
||||
link=dict(required=False),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
@ -187,14 +177,16 @@ def main():
|
|||
module.exit_json(changed=False)
|
||||
|
||||
# Send the data to Flowdock
|
||||
try:
|
||||
response = urllib2.urlopen(url, urllib.urlencode(params))
|
||||
except Exception, e:
|
||||
module.fail_json(msg="unable to send msg: %s" % e)
|
||||
data = urllib.urlencode(params)
|
||||
response, info = fetch_url(module, url, data=data)
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="unable to send msg: %s" % info['msg'])
|
||||
|
||||
module.exit_json(changed=False, msg=module.params["msg"])
|
||||
module.exit_json(changed=True, msg=module.params["msg"])
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
|
||||
main()
|
||||
|
||||
|
|
|
@ -31,6 +31,14 @@ options:
|
|||
description:
|
||||
- Icon for the service
|
||||
required: false
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
version_added: 1.5.1
|
||||
author: Jonas Pfenniger <zimbatm@zimbatm.com>
|
||||
'''
|
||||
|
||||
|
@ -41,8 +49,6 @@ EXAMPLES = '''
|
|||
message=deployed {{ target }}
|
||||
'''
|
||||
|
||||
import urllib
|
||||
|
||||
BASE_URL = 'https://grove.io/api/notice/%s/'
|
||||
|
||||
# ==============================================================
|
||||
|
@ -57,7 +63,10 @@ def do_notify_grove(module, channel_token, service, message, url=None, icon_url=
|
|||
if icon_url is not None:
|
||||
my_data['icon_url'] = icon_url
|
||||
|
||||
urllib.urlopen(my_url, urllib.urlencode(my_data))
|
||||
data = urllib.urlencode(my_data)
|
||||
response, info = fetch_url(module, my_url, data=data)
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="failed to send notification: %s" % info['msg'])
|
||||
|
||||
# ==============================================================
|
||||
# main
|
||||
|
@ -70,6 +79,7 @@ def main():
|
|||
service = dict(type='str', default='ansible'),
|
||||
url = dict(type='str', default=None),
|
||||
icon_url = dict(type='str', default=None),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
)
|
||||
)
|
||||
|
||||
|
|
|
@ -46,6 +46,21 @@ options:
|
|||
required: false
|
||||
default: 'yes'
|
||||
choices: [ "yes", "no" ]
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
version_added: 1.5.1
|
||||
api:
|
||||
description:
|
||||
- API url if using a self-hosted hipchat server
|
||||
required: false
|
||||
default: 'https://api.hipchat.com/v1/rooms/message'
|
||||
version_added: 1.6.0
|
||||
|
||||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ urllib, urllib2 ]
|
||||
|
@ -60,23 +75,10 @@ EXAMPLES = '''
|
|||
# HipChat module specific support methods.
|
||||
#
|
||||
|
||||
HAS_URLLIB = True
|
||||
try:
|
||||
import urllib
|
||||
except ImportError:
|
||||
HAS_URLLIB = False
|
||||
MSG_URI = "https://api.hipchat.com/v1/rooms/message"
|
||||
|
||||
HAS_URLLIB2 = True
|
||||
try:
|
||||
import urllib2
|
||||
except ImportError:
|
||||
HAS_URLLIB2 = False
|
||||
|
||||
MSG_URI = "https://api.hipchat.com/v1/rooms/message?"
|
||||
|
||||
|
||||
def send_msg(token, room, msg_from, msg, msg_format='text',
|
||||
color='yellow', notify=False):
|
||||
def send_msg(module, token, room, msg_from, msg, msg_format='text',
|
||||
color='yellow', notify=False, api=MSG_URI):
|
||||
'''sending message to hipchat'''
|
||||
|
||||
params = {}
|
||||
|
@ -85,15 +87,20 @@ def send_msg(token, room, msg_from, msg, msg_format='text',
|
|||
params['message'] = msg
|
||||
params['message_format'] = msg_format
|
||||
params['color'] = color
|
||||
params['api'] = api
|
||||
|
||||
if notify:
|
||||
params['notify'] = 1
|
||||
else:
|
||||
params['notify'] = 0
|
||||
|
||||
url = MSG_URI + "auth_token=%s" % (token)
|
||||
response = urllib2.urlopen(url, urllib.urlencode(params))
|
||||
return response.read()
|
||||
url = api + "?auth_token=%s" % (token)
|
||||
data = urllib.urlencode(params)
|
||||
response, info = fetch_url(module, url, data=data)
|
||||
if info['status'] == 200:
|
||||
return response.read()
|
||||
else:
|
||||
module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
|
||||
|
||||
|
||||
# ===========================================
|
||||
|
@ -102,11 +109,6 @@ def send_msg(token, room, msg_from, msg, msg_format='text',
|
|||
|
||||
def main():
|
||||
|
||||
if not HAS_URLLIB:
|
||||
module.fail_json(msg="urllib is not installed")
|
||||
if not HAS_URLLIB2:
|
||||
module.fail_json(msg="urllib2 is not installed")
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
token=dict(required=True),
|
||||
|
@ -117,6 +119,8 @@ def main():
|
|||
"purple", "gray", "random"]),
|
||||
msg_format=dict(default="text", choices=["text", "html"]),
|
||||
notify=dict(default=True, type='bool'),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
api = dict(default=MSG_URI),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
@ -128,17 +132,18 @@ def main():
|
|||
color = module.params["color"]
|
||||
msg_format = module.params["msg_format"]
|
||||
notify = module.params["notify"]
|
||||
api = module.params["api"]
|
||||
|
||||
try:
|
||||
send_msg(token, room, msg_from, msg, msg_format,
|
||||
color, notify)
|
||||
send_msg(module, token, room, msg_from, msg, msg_format, color, notify, api)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="unable to sent msg: %s" % e)
|
||||
|
||||
changed = True
|
||||
module.exit_json(changed=changed, room=room, msg_from=msg_from,
|
||||
msg=msg)
|
||||
module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
|
||||
main()
|
||||
|
|
|
@ -39,7 +39,7 @@ options:
|
|||
default: 6667
|
||||
nick:
|
||||
description:
|
||||
- Nickname
|
||||
- Nickname. May be shortened, depending on server's NICKLEN setting.
|
||||
required: false
|
||||
default: ansible
|
||||
msg:
|
||||
|
@ -49,10 +49,10 @@ options:
|
|||
default: null
|
||||
color:
|
||||
description:
|
||||
- Text color for the message. Default is black.
|
||||
- Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
|
||||
required: false
|
||||
default: black
|
||||
choices: [ "yellow", "red", "green", "blue", "black" ]
|
||||
default: "none"
|
||||
choices: [ "none", "yellow", "red", "green", "blue", "black" ]
|
||||
channel:
|
||||
description:
|
||||
- Channel name
|
||||
|
@ -94,7 +94,7 @@ from time import sleep
|
|||
|
||||
|
||||
def send_msg(channel, msg, server='localhost', port='6667',
|
||||
nick="ansible", color='black', passwd=False, timeout=30):
|
||||
nick="ansible", color='none', passwd=False, timeout=30):
|
||||
'''send message to IRC'''
|
||||
|
||||
colornumbers = {
|
||||
|
@ -107,10 +107,11 @@ def send_msg(channel, msg, server='localhost', port='6667',
|
|||
|
||||
try:
|
||||
colornumber = colornumbers[color]
|
||||
colortext = "\x03" + colornumber
|
||||
except:
|
||||
colornumber = "01" # black
|
||||
colortext = ""
|
||||
|
||||
message = "\x03" + colornumber + msg
|
||||
message = colortext + msg
|
||||
|
||||
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
irc.connect((server, int(port)))
|
||||
|
@ -122,11 +123,15 @@ def send_msg(channel, msg, server='localhost', port='6667',
|
|||
start = time.time()
|
||||
while 1:
|
||||
motd += irc.recv(1024)
|
||||
if re.search('^:\S+ 00[1-4] %s :' % nick, motd, flags=re.M):
|
||||
# The server might send back a shorter nick than we specified (due to NICKLEN),
|
||||
# so grab that and use it from now on (assuming we find the 00[1-4] response).
|
||||
match = re.search('^:\S+ 00[1-4] (?P<nick>\S+) :', motd, flags=re.M)
|
||||
if match:
|
||||
nick = match.group('nick')
|
||||
break
|
||||
elif time.time() - start > timeout:
|
||||
raise Exception('Timeout waiting for IRC server welcome response')
|
||||
time.sleep(0.5)
|
||||
sleep(0.5)
|
||||
|
||||
irc.send('JOIN %s\r\n' % channel)
|
||||
join = ''
|
||||
|
@ -137,13 +142,13 @@ def send_msg(channel, msg, server='localhost', port='6667',
|
|||
break
|
||||
elif time.time() - start > timeout:
|
||||
raise Exception('Timeout waiting for IRC JOIN response')
|
||||
time.sleep(0.5)
|
||||
sleep(0.5)
|
||||
|
||||
irc.send('PRIVMSG %s :%s\r\n' % (channel, message))
|
||||
time.sleep(1)
|
||||
sleep(1)
|
||||
irc.send('PART %s\r\n' % channel)
|
||||
irc.send('QUIT\r\n')
|
||||
time.sleep(1)
|
||||
sleep(1)
|
||||
irc.close()
|
||||
|
||||
# ===========================================
|
||||
|
@ -158,8 +163,8 @@ def main():
|
|||
port=dict(default=6667),
|
||||
nick=dict(default='ansible'),
|
||||
msg=dict(required=True),
|
||||
color=dict(default="black", choices=["yellow", "red", "green",
|
||||
"blue", "black"]),
|
||||
color=dict(default="none", choices=["yellow", "red", "green",
|
||||
"blue", "black", "none"]),
|
||||
channel=dict(required=True),
|
||||
passwd=dict(),
|
||||
timeout=dict(type='int', default=30)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2013, Jan-Piet Mens <jpmens () gmail.com>
|
||||
# (c) 2013, 2014, Jan-Piet Mens <jpmens () gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
|
@ -80,7 +80,7 @@ options:
|
|||
requirements: [ mosquitto ]
|
||||
notes:
|
||||
- This module requires a connection to an MQTT broker such as Mosquitto
|
||||
U(http://mosquitto.org) and the C(mosquitto) Python module (U(http://mosquitto.org/python)).
|
||||
U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.python.org/pypi/paho-mqtt)).
|
||||
author: Jan-Piet Mens
|
||||
'''
|
||||
|
||||
|
@ -97,34 +97,12 @@ EXAMPLES = '''
|
|||
# MQTT module support methods.
|
||||
#
|
||||
|
||||
HAS_MOSQUITTO = True
|
||||
HAS_PAHOMQTT = True
|
||||
try:
|
||||
import socket
|
||||
import mosquitto
|
||||
import paho.mqtt.publish as mqtt
|
||||
except ImportError:
|
||||
HAS_MOSQUITTO = False
|
||||
import os
|
||||
|
||||
def publish(module, topic, payload, server='localhost', port='1883', qos='0',
|
||||
client_id='', retain=False, username=None, password=None):
|
||||
'''Open connection to MQTT broker and publish the topic'''
|
||||
|
||||
mqttc = mosquitto.Mosquitto(client_id, clean_session=True)
|
||||
|
||||
if username is not None and password is not None:
|
||||
mqttc.username_pw_set(username, password)
|
||||
|
||||
rc = mqttc.connect(server, int(port), 5)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="unable to connect to MQTT broker")
|
||||
|
||||
mqttc.publish(topic, payload, int(qos), retain)
|
||||
rc = mqttc.loop()
|
||||
if rc != 0:
|
||||
module.fail_json(msg="unable to send to MQTT broker")
|
||||
|
||||
mqttc.disconnect()
|
||||
|
||||
HAS_PAHOMQTT = False
|
||||
|
||||
# ===========================================
|
||||
# Main
|
||||
|
@ -132,10 +110,6 @@ def publish(module, topic, payload, server='localhost', port='1883', qos='0',
|
|||
|
||||
def main():
|
||||
|
||||
if not HAS_MOSQUITTO:
|
||||
module.fail_json(msg="mosquitto is not installed")
|
||||
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
server = dict(default = 'localhost'),
|
||||
|
@ -151,15 +125,18 @@ def main():
|
|||
supports_check_mode=True
|
||||
)
|
||||
|
||||
server = module.params["server"]
|
||||
port = module.params["port"]
|
||||
topic = module.params["topic"]
|
||||
payload = module.params["payload"]
|
||||
client_id = module.params["client_id"]
|
||||
qos = module.params["qos"]
|
||||
retain = module.params["retain"]
|
||||
username = module.params["username"]
|
||||
password = module.params["password"]
|
||||
if not HAS_PAHOMQTT:
|
||||
module.fail_json(msg="Paho MQTT is not installed")
|
||||
|
||||
server = module.params.get("server", 'localhost')
|
||||
port = module.params.get("port", 1883)
|
||||
topic = module.params.get("topic")
|
||||
payload = module.params.get("payload")
|
||||
client_id = module.params.get("client_id", '')
|
||||
qos = int(module.params.get("qos", 0))
|
||||
retain = module.params.get("retain")
|
||||
username = module.params.get("username", None)
|
||||
password = module.params.get("password", None)
|
||||
|
||||
if client_id is None:
|
||||
client_id = "%s_%s" % (socket.getfqdn(), os.getpid())
|
||||
|
@ -167,9 +144,18 @@ def main():
|
|||
if payload and payload == 'None':
|
||||
payload = None
|
||||
|
||||
auth=None
|
||||
if username is not None:
|
||||
auth = { 'username' : username, 'password' : password }
|
||||
|
||||
try:
|
||||
publish(module, topic, payload, server, port, qos, client_id, retain,
|
||||
username, password)
|
||||
rc = mqtt.single(topic, payload,
|
||||
qos=qos,
|
||||
retain=retain,
|
||||
client_id=client_id,
|
||||
hostname=server,
|
||||
port=port,
|
||||
auth=auth)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="unable to publish to MQTT broker %s" % (e))
|
||||
|
||||
|
|
140
notification/nexmo
Normal file
140
notification/nexmo
Normal file
|
@ -0,0 +1,140 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2014, Matt Martz <matt@sivel.net>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = """
|
||||
module: nexmo
|
||||
short_description: Send a SMS via nexmo
|
||||
description:
|
||||
- Send a SMS message via nexmo
|
||||
version_added: 1.6
|
||||
author: Matt Martz
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Nexmo API Key
|
||||
required: true
|
||||
api_secret:
|
||||
description:
|
||||
- Nexmo API Secret
|
||||
required: true
|
||||
src:
|
||||
description:
|
||||
- Nexmo Number to send from
|
||||
required: true
|
||||
dest:
|
||||
description:
|
||||
- Phone number(s) to send SMS message to
|
||||
required: true
|
||||
msg:
|
||||
description:
|
||||
- Message to text to send. Messages longer than 160 characters will be
|
||||
split into multiple messages
|
||||
required: true
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: 'yes'
|
||||
choices:
|
||||
- 'yes'
|
||||
- 'no'
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Send notification message via Nexmo
|
||||
local_action:
|
||||
module: nexmo
|
||||
api_key: 640c8a53
|
||||
api_secret: 0ce239a6
|
||||
src: 12345678901
|
||||
dest:
|
||||
- 10987654321
|
||||
- 16789012345
|
||||
msg: "{{ inventory_hostname }} completed"
|
||||
"""
|
||||
|
||||
|
||||
NEXMO_API = 'https://rest.nexmo.com/sms/json'
|
||||
|
||||
|
||||
def send_msg(module):
|
||||
failed = list()
|
||||
responses = dict()
|
||||
msg = {
|
||||
'api_key': module.params.get('api_key'),
|
||||
'api_secret': module.params.get('api_secret'),
|
||||
'from': module.params.get('src'),
|
||||
'text': module.params.get('msg')
|
||||
}
|
||||
for number in module.params.get('dest'):
|
||||
msg['to'] = number
|
||||
url = "%s?%s" % (NEXMO_API, urllib.urlencode(msg))
|
||||
|
||||
headers = dict(Accept='application/json')
|
||||
response, info = fetch_url(module, url, headers=headers)
|
||||
if info['status'] != 200:
|
||||
failed.append(number)
|
||||
responses[number] = dict(failed=True)
|
||||
|
||||
try:
|
||||
responses[number] = json.load(response)
|
||||
except:
|
||||
failed.append(number)
|
||||
responses[number] = dict(failed=True)
|
||||
else:
|
||||
for message in responses[number]['messages']:
|
||||
if int(message['status']) != 0:
|
||||
failed.append(number)
|
||||
responses[number] = dict(failed=True, **responses[number])
|
||||
|
||||
if failed:
|
||||
msg = 'One or messages failed to send'
|
||||
else:
|
||||
msg = ''
|
||||
|
||||
module.exit_json(failed=bool(failed), msg=msg, changed=False,
|
||||
responses=responses)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = url_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
api_key=dict(required=True, no_log=True),
|
||||
api_secret=dict(required=True, no_log=True),
|
||||
src=dict(required=True, type='int'),
|
||||
dest=dict(required=True, type='list'),
|
||||
msg=dict(required=True),
|
||||
),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec
|
||||
)
|
||||
|
||||
send_msg(module)
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
|
||||
main()
|
|
@ -44,8 +44,6 @@ EXAMPLES = '''
|
|||
- local_action: osx_say msg="{{inventory_hostname}} is all done" voice=Zarvox
|
||||
'''
|
||||
|
||||
import subprocess
|
||||
|
||||
DEFAULT_VOICE='Trinoids'
|
||||
|
||||
def say(module, msg, voice):
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue