merge devel and fix conflicts
This commit is contained in:
commit
21f56aef77
114 changed files with 4832 additions and 1246 deletions
|
@ -14,3 +14,4 @@ script:
|
|||
- python2.4 -m compileall -fq cloud/amazon/_ec2_ami_search.py cloud/amazon/ec2_facts.py
|
||||
- python2.6 -m compileall -fq .
|
||||
- python2.7 -m compileall -fq .
|
||||
#- ./test-docs.sh core
|
||||
|
|
|
@ -22,6 +22,10 @@ I'd also read the community page above, but in particular, make sure you copy [t
|
|||
|
||||
Also please make sure you are testing on the latest released version of Ansible or the development branch.
|
||||
|
||||
If you'd like to contribute code to an existing module
|
||||
======================================================
|
||||
Each module in Core is maintained by the owner of that module; each module's owner is indicated in the documentation section of the module itself. Any pull request for a module that is given a +1 by the owner in the comments will be merged by the Ansible team.
|
||||
|
||||
Thanks!
|
||||
|
||||
|
||||
|
|
|
@ -83,7 +83,6 @@ EXAMPLES = '''
|
|||
|
||||
import csv
|
||||
import json
|
||||
import urllib2
|
||||
import urlparse
|
||||
|
||||
SUPPORTED_DISTROS = ['ubuntu']
|
||||
|
@ -102,11 +101,12 @@ AWS_REGIONS = ['ap-northeast-1',
|
|||
|
||||
def get_url(module, url):
|
||||
""" Get url and return response """
|
||||
try:
|
||||
r = urllib2.urlopen(url)
|
||||
except (urllib2.HTTPError, urllib2.URLError), e:
|
||||
code = getattr(e, 'code', -1)
|
||||
module.fail_json(msg="Request failed: %s" % str(e), status_code=code)
|
||||
|
||||
r, info = fetch_url(module, url)
|
||||
if info['status'] != 200:
|
||||
# Backwards compat
|
||||
info['status_code'] = info['status']
|
||||
module.fail_json(**info)
|
||||
return r
|
||||
|
||||
|
||||
|
@ -182,7 +182,7 @@ def main():
|
|||
choices=['i386', 'amd64']),
|
||||
region=dict(required=False, default='us-east-1', choices=AWS_REGIONS),
|
||||
virt=dict(required=False, default='paravirtual',
|
||||
choices=['paravirtual', 'hvm'])
|
||||
choices=['paravirtual', 'hvm']),
|
||||
)
|
||||
module = AnsibleModule(argument_spec=arg_spec)
|
||||
distro = module.params['distro']
|
||||
|
@ -196,6 +196,7 @@ def main():
|
|||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -26,34 +26,28 @@ options:
|
|||
description:
|
||||
- name of the cloudformation stack
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
disable_rollback:
|
||||
description:
|
||||
- If a stacks fails to form, rollback will remove the stack
|
||||
required: false
|
||||
default: "false"
|
||||
choices: [ "true", "false" ]
|
||||
aliases: []
|
||||
template_parameters:
|
||||
description:
|
||||
- a list of hashes of all the template variables for the stack
|
||||
required: false
|
||||
default: {}
|
||||
aliases: []
|
||||
state:
|
||||
description:
|
||||
- If state is "present", stack will be created. If state is "present" and if stack exists and template has changed, it will be updated.
|
||||
If state is "absent", stack will be removed.
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
template:
|
||||
description:
|
||||
- The local path of the cloudformation template. This parameter is mutually exclusive with 'template_url'. Either one of them is required if "state" parameter is "present"
|
||||
Must give full path to the file, relative to the working directory. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json"
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
notification_arns:
|
||||
description:
|
||||
- The Simple Notification Service (SNS) topic ARNs to publish stack related events.
|
||||
|
@ -65,21 +59,18 @@ options:
|
|||
- the path of the cloudformation stack policy
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: "x.x"
|
||||
version_added: "1.9"
|
||||
tags:
|
||||
description:
|
||||
- Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later.
|
||||
Requires at least Boto version 2.6.0.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: "1.4"
|
||||
region:
|
||||
description:
|
||||
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
|
||||
required: true
|
||||
default: null
|
||||
aliases: ['aws_region', 'ec2_region']
|
||||
version_added: "1.5"
|
||||
template_url:
|
||||
|
@ -88,7 +79,8 @@ options:
|
|||
required: false
|
||||
version_added: "2.0"
|
||||
template_format:
|
||||
description: For local templates, allows specification of json or yaml format
|
||||
description:
|
||||
- For local templates, allows specification of json or yaml format
|
||||
default: json
|
||||
choices: [ json, yaml ]
|
||||
required: false
|
||||
|
@ -115,6 +107,22 @@ EXAMPLES = '''
|
|||
tags:
|
||||
Stack: "ansible-cloudformation"
|
||||
|
||||
# Basic role example
|
||||
- name: launch ansible cloudformation example
|
||||
cloudformation:
|
||||
stack_name: "ansible-cloudformation"
|
||||
state: "present"
|
||||
region: "us-east-1"
|
||||
disable_rollback: true
|
||||
template: "roles/cloudformation/files/cloudformation-example.json"
|
||||
template_parameters:
|
||||
KeyName: "jmartin"
|
||||
DiskType: "ephemeral"
|
||||
InstanceType: "m1.small"
|
||||
ClusterSize: 3
|
||||
tags:
|
||||
Stack: "ansible-cloudformation"
|
||||
|
||||
# Removal example
|
||||
- name: tear down old deployment
|
||||
cloudformation:
|
||||
|
|
|
@ -144,7 +144,7 @@ options:
|
|||
instance_tags:
|
||||
version_added: "1.0"
|
||||
description:
|
||||
- a hash/dictionary of tags to add to the new instance; '{"key":"value"}' and '{"key":"value","key":"value"}'
|
||||
- a hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}'
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
|
@ -229,19 +229,26 @@ options:
|
|||
exact_count:
|
||||
version_added: "1.5"
|
||||
description:
|
||||
- An integer value which indicates how many instances that match the 'count_tag' parameter should be running. Instances are either created or terminated based on this value.
|
||||
- An integer value which indicates how many instances that match the 'count_tag' parameter should be running. Instances are either created or terminated based on this value.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
count_tag:
|
||||
version_added: "1.5"
|
||||
description:
|
||||
- Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running. This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers that are tagged with "class=webserver".
|
||||
- Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running. This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers that are tagged with "class=webserver".
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
network_interfaces:
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces, none of the assign_public_ip, private_ip, vpc_subnet_id, group, or group_id parameters may be used. (Those parameters are for creating a new network interface at launch.)
|
||||
required: false
|
||||
default: null
|
||||
aliases: ['network_interface']
|
||||
|
||||
author:
|
||||
author:
|
||||
- "Tim Gerla (@tgerla)"
|
||||
- "Lester Wade (@lwade)"
|
||||
- "Seth Vidal"
|
||||
|
@ -271,7 +278,7 @@ EXAMPLES = '''
|
|||
wait: yes
|
||||
wait_timeout: 500
|
||||
count: 5
|
||||
instance_tags:
|
||||
instance_tags:
|
||||
db: postgres
|
||||
monitoring: yes
|
||||
vpc_subnet_id: subnet-29e63245
|
||||
|
@ -305,7 +312,7 @@ EXAMPLES = '''
|
|||
wait: yes
|
||||
wait_timeout: 500
|
||||
count: 5
|
||||
instance_tags:
|
||||
instance_tags:
|
||||
db: postgres
|
||||
monitoring: yes
|
||||
vpc_subnet_id: subnet-29e63245
|
||||
|
@ -352,6 +359,19 @@ EXAMPLES = '''
|
|||
vpc_subnet_id: subnet-29e63245
|
||||
assign_public_ip: yes
|
||||
|
||||
# Examples using pre-existing network interfaces
|
||||
- ec2:
|
||||
key_name: mykey
|
||||
instance_type: t2.small
|
||||
image: ami-f005ba11
|
||||
network_interface: eni-deadbeef
|
||||
|
||||
- ec2:
|
||||
key_name: mykey
|
||||
instance_type: t2.small
|
||||
image: ami-f005ba11
|
||||
network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e']
|
||||
|
||||
# Launch instances, runs some tasks
|
||||
# and then terminate them
|
||||
|
||||
|
@ -366,7 +386,7 @@ EXAMPLES = '''
|
|||
region: us-east-1
|
||||
tasks:
|
||||
- name: Launch instance
|
||||
ec2:
|
||||
ec2:
|
||||
key_name: "{{ keypair }}"
|
||||
group: "{{ security_group }}"
|
||||
instance_type: "{{ instance_type }}"
|
||||
|
@ -446,6 +466,15 @@ EXAMPLES = '''
|
|||
vpc_subnet_id: subnet-29e63245
|
||||
assign_public_ip: yes
|
||||
|
||||
#
|
||||
# Start stopped instances specified by tag
|
||||
#
|
||||
- local_action:
|
||||
module: ec2
|
||||
instance_tags:
|
||||
Name: ExtraPower
|
||||
state: running
|
||||
|
||||
#
|
||||
# Enforce that 5 instances with a tag "foo" are running
|
||||
# (Highly recommended!)
|
||||
|
@ -474,11 +503,11 @@ EXAMPLES = '''
|
|||
image: ami-40603AD1
|
||||
wait: yes
|
||||
group: webserver
|
||||
instance_tags:
|
||||
instance_tags:
|
||||
Name: database
|
||||
dbtype: postgres
|
||||
exact_count: 5
|
||||
count_tag:
|
||||
count_tag:
|
||||
Name: database
|
||||
dbtype: postgres
|
||||
vpc_subnet_id: subnet-29e63245
|
||||
|
@ -531,8 +560,8 @@ def find_running_instances_by_count_tag(module, ec2, count_tag, zone=None):
|
|||
for res in reservations:
|
||||
if hasattr(res, 'instances'):
|
||||
for inst in res.instances:
|
||||
instances.append(inst)
|
||||
|
||||
instances.append(inst)
|
||||
|
||||
return reservations, instances
|
||||
|
||||
|
||||
|
@ -543,7 +572,7 @@ def _set_none_to_blank(dictionary):
|
|||
result[k] = _set_none_to_blank(result[k])
|
||||
elif not result[k]:
|
||||
result[k] = ""
|
||||
return result
|
||||
return result
|
||||
|
||||
|
||||
def get_reservations(module, ec2, tags=None, state=None, zone=None):
|
||||
|
@ -682,7 +711,7 @@ def create_block_device(module, ec2, volume):
|
|||
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
|
||||
MAX_IOPS_TO_SIZE_RATIO = 30
|
||||
if 'snapshot' not in volume and 'ephemeral' not in volume:
|
||||
if 'volume_size' not in volume:
|
||||
if 'volume_size' not in volume:
|
||||
module.fail_json(msg = 'Size must be specified when creating a new volume or modifying the root volume')
|
||||
if 'snapshot' in volume:
|
||||
if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume:
|
||||
|
@ -692,8 +721,10 @@ def create_block_device(module, ec2, volume):
|
|||
size = volume.get('volume_size', snapshot.volume_size)
|
||||
if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
|
||||
module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
|
||||
if 'encrypted' in volume:
|
||||
module.fail_json(msg = 'You can not set encyrption when creating a volume from a snapshot')
|
||||
if 'ephemeral' in volume:
|
||||
if 'snapshot' in volume:
|
||||
if 'snapshot' in volume:
|
||||
module.fail_json(msg = 'Cannot set both ephemeral and snapshot')
|
||||
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
|
||||
ephemeral_name=volume.get('ephemeral'),
|
||||
|
@ -701,8 +732,7 @@ def create_block_device(module, ec2, volume):
|
|||
volume_type=volume.get('device_type'),
|
||||
delete_on_termination=volume.get('delete_on_termination', False),
|
||||
iops=volume.get('iops'),
|
||||
encrypted=volume.get('encrypted', False))
|
||||
|
||||
encrypted=volume.get('encrypted', None))
|
||||
def boto_supports_param_in_spot_request(ec2, param):
|
||||
"""
|
||||
Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
|
||||
|
@ -759,18 +789,18 @@ def enforce_count(module, ec2, vpc):
|
|||
for inst in instance_dict_array:
|
||||
inst['state'] = "terminated"
|
||||
terminated_list.append(inst)
|
||||
instance_dict_array = terminated_list
|
||||
|
||||
# ensure all instances are dictionaries
|
||||
instance_dict_array = terminated_list
|
||||
|
||||
# ensure all instances are dictionaries
|
||||
all_instances = []
|
||||
for inst in instances:
|
||||
if type(inst) is not dict:
|
||||
inst = get_instance_info(inst)
|
||||
all_instances.append(inst)
|
||||
all_instances.append(inst)
|
||||
|
||||
return (all_instances, instance_dict_array, changed_instance_ids, changed)
|
||||
|
||||
|
||||
|
||||
|
||||
def create_instances(module, ec2, vpc, override_count=None):
|
||||
"""
|
||||
Creates new instances
|
||||
|
@ -816,6 +846,7 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
count_tag = module.params.get('count_tag')
|
||||
source_dest_check = module.boolean(module.params.get('source_dest_check'))
|
||||
termination_protection = module.boolean(module.params.get('termination_protection'))
|
||||
network_interfaces = module.params.get('network_interfaces')
|
||||
|
||||
# group_id and group_name are exclusive of each other
|
||||
if group_id and group_name:
|
||||
|
@ -823,7 +854,10 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
|
||||
vpc_id = None
|
||||
if vpc_subnet_id:
|
||||
vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
|
||||
if not vpc:
|
||||
module.fail_json(msg="region must be specified")
|
||||
else:
|
||||
vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
|
||||
else:
|
||||
vpc_id = None
|
||||
|
||||
|
@ -878,7 +912,7 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
|
||||
if ebs_optimized:
|
||||
params['ebs_optimized'] = ebs_optimized
|
||||
|
||||
|
||||
# 'tenancy' always has a default value, but it is not a valid parameter for spot instance resquest
|
||||
if not spot_price:
|
||||
params['tenancy'] = tenancy
|
||||
|
@ -911,21 +945,33 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
groups=group_id,
|
||||
associate_public_ip_address=assign_public_ip)
|
||||
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
|
||||
params['network_interfaces'] = interfaces
|
||||
params['network_interfaces'] = interfaces
|
||||
else:
|
||||
params['subnet_id'] = vpc_subnet_id
|
||||
if vpc_subnet_id:
|
||||
params['security_group_ids'] = group_id
|
||||
if network_interfaces:
|
||||
if isinstance(network_interfaces, basestring):
|
||||
network_interfaces = [network_interfaces]
|
||||
interfaces = []
|
||||
for i, network_interface_id in enumerate(network_interfaces):
|
||||
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
|
||||
network_interface_id=network_interface_id,
|
||||
device_index=i)
|
||||
interfaces.append(interface)
|
||||
params['network_interfaces'] = \
|
||||
boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces)
|
||||
else:
|
||||
params['security_groups'] = group_name
|
||||
params['subnet_id'] = vpc_subnet_id
|
||||
if vpc_subnet_id:
|
||||
params['security_group_ids'] = group_id
|
||||
else:
|
||||
params['security_groups'] = group_name
|
||||
|
||||
if volumes:
|
||||
bdm = BlockDeviceMapping()
|
||||
for volume in volumes:
|
||||
for volume in volumes:
|
||||
if 'device_name' not in volume:
|
||||
module.fail_json(msg = 'Device name must be set for volume')
|
||||
# Minimum volume size is 1GB. We'll use volume size explicitly set to 0
|
||||
# to be a signal not to create this volume
|
||||
# to be a signal not to create this volume
|
||||
if 'volume_size' not in volume or int(volume['volume_size']) > 0:
|
||||
bdm[volume['device_name']] = create_block_device(module, ec2, volume)
|
||||
|
||||
|
@ -1015,7 +1061,7 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
num_running = 0
|
||||
wait_timeout = time.time() + wait_timeout
|
||||
while wait_timeout > time.time() and num_running < len(instids):
|
||||
try:
|
||||
try:
|
||||
res_list = ec2.get_all_instances(instids)
|
||||
except boto.exception.BotoServerError, e:
|
||||
if e.error_code == 'InvalidInstanceID.NotFound':
|
||||
|
@ -1028,7 +1074,7 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
for res in res_list:
|
||||
num_running += len([ i for i in res.instances if i.state=='running' ])
|
||||
if len(res_list) <= 0:
|
||||
# got a bad response of some sort, possibly due to
|
||||
# got a bad response of some sort, possibly due to
|
||||
# stale/cached data. Wait a second and then try again
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
@ -1140,12 +1186,12 @@ def terminate_instances(module, ec2, instance_ids):
|
|||
filters={'instance-state-name':'terminated'}):
|
||||
for inst in res.instances:
|
||||
instance_dict_array.append(get_instance_info(inst))
|
||||
|
||||
|
||||
|
||||
return (changed, instance_dict_array, terminated_instance_ids)
|
||||
|
||||
|
||||
def startstop_instances(module, ec2, instance_ids, state):
|
||||
def startstop_instances(module, ec2, instance_ids, state, instance_tags):
|
||||
"""
|
||||
Starts or stops a list of existing instances
|
||||
|
||||
|
@ -1153,6 +1199,8 @@ def startstop_instances(module, ec2, instance_ids, state):
|
|||
ec2: authenticated ec2 connection object
|
||||
instance_ids: The list of instances to start in the form of
|
||||
[ {id: <inst-id>}, ..]
|
||||
instance_tags: A dict of tag keys and values in the form of
|
||||
{key: value, ... }
|
||||
state: Intended state ("running" or "stopped")
|
||||
|
||||
Returns a dictionary of instance information
|
||||
|
@ -1161,19 +1209,33 @@ def startstop_instances(module, ec2, instance_ids, state):
|
|||
If the instance was not able to change state,
|
||||
"changed" will be set to False.
|
||||
|
||||
Note that if instance_ids and instance_tags are both non-empty,
|
||||
this method will process the intersection of the two
|
||||
"""
|
||||
|
||||
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
changed = False
|
||||
instance_dict_array = []
|
||||
|
||||
|
||||
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
|
||||
module.fail_json(msg='instance_ids should be a list of instances, aborting')
|
||||
# Fail unless the user defined instance tags
|
||||
if not instance_tags:
|
||||
module.fail_json(msg='instance_ids should be a list of instances, aborting')
|
||||
|
||||
# To make an EC2 tag filter, we need to prepend 'tag:' to each key.
|
||||
# An empty filter does no filtering, so it's safe to pass it to the
|
||||
# get_all_instances method even if the user did not specify instance_tags
|
||||
filters = {}
|
||||
if instance_tags:
|
||||
for key, value in instance_tags.items():
|
||||
filters["tag:" + key] = value
|
||||
|
||||
# Check that our instances are not in the state we want to take
|
||||
|
||||
# Check (and eventually change) instances attributes and instances state
|
||||
running_instances_array = []
|
||||
for res in ec2.get_all_instances(instance_ids):
|
||||
for res in ec2.get_all_instances(instance_ids, filters=filters):
|
||||
for inst in res.instances:
|
||||
|
||||
# Check "source_dest_check" attribute
|
||||
|
@ -1225,7 +1287,7 @@ def main():
|
|||
argument_spec.update(dict(
|
||||
key_name = dict(aliases = ['keypair']),
|
||||
id = dict(),
|
||||
group = dict(type='list'),
|
||||
group = dict(type='list', aliases=['groups']),
|
||||
group_id = dict(type='list'),
|
||||
zone = dict(aliases=['aws_zone', 'ec2_zone']),
|
||||
instance_type = dict(aliases=['type']),
|
||||
|
@ -1255,6 +1317,7 @@ def main():
|
|||
volumes = dict(type='list'),
|
||||
ebs_optimized = dict(type='bool', default=False),
|
||||
tenancy = dict(default='default'),
|
||||
network_interfaces = dict(type='list', aliases=['network_interface'])
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -1263,7 +1326,12 @@ def main():
|
|||
mutually_exclusive = [
|
||||
['exact_count', 'count'],
|
||||
['exact_count', 'state'],
|
||||
['exact_count', 'instance_ids']
|
||||
['exact_count', 'instance_ids'],
|
||||
['network_interfaces', 'assign_public_ip'],
|
||||
['network_interfaces', 'group'],
|
||||
['network_interfaces', 'group_id'],
|
||||
['network_interfaces', 'private_ip'],
|
||||
['network_interfaces', 'vpc_subnet_id'],
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -1280,25 +1348,26 @@ def main():
|
|||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg = str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
vpc = None
|
||||
|
||||
tagged_instances = []
|
||||
tagged_instances = []
|
||||
|
||||
state = module.params.get('state')
|
||||
state = module.params['state']
|
||||
|
||||
if state == 'absent':
|
||||
instance_ids = module.params.get('instance_ids')
|
||||
if not isinstance(instance_ids, list):
|
||||
module.fail_json(msg='termination_list needs to be a list of instances to terminate')
|
||||
instance_ids = module.params['instance_ids']
|
||||
if not instance_ids:
|
||||
module.fail_json(msg='instance_ids list is required for absent state')
|
||||
|
||||
(changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
|
||||
|
||||
elif state in ('running', 'stopped'):
|
||||
instance_ids = module.params.get('instance_ids')
|
||||
if not isinstance(instance_ids, list):
|
||||
module.fail_json(msg='running list needs to be a list of instances to run: %s' % instance_ids)
|
||||
instance_tags = module.params.get('instance_tags')
|
||||
if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
|
||||
module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
|
||||
|
||||
(changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state)
|
||||
(changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags)
|
||||
|
||||
elif state == 'present':
|
||||
# Changed is always set to true when provisioning new instances
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_ami_find
|
||||
version_added: 2.0
|
||||
version_added: '2.0'
|
||||
short_description: Searches for AMIs to obtain the AMI ID and other information
|
||||
description:
|
||||
- Returns list of matching AMIs with AMI ID, along with other useful information
|
||||
|
|
|
@ -43,7 +43,7 @@ options:
|
|||
launch_config_name:
|
||||
description:
|
||||
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
|
||||
required: false
|
||||
required: true
|
||||
min_size:
|
||||
description:
|
||||
- Minimum number of instances in group
|
||||
|
@ -67,7 +67,7 @@ options:
|
|||
- Number of instances you'd like to replace at a time. Used with replace_all_instances.
|
||||
required: false
|
||||
version_added: "1.8"
|
||||
default: 1
|
||||
default: 1
|
||||
replace_instances:
|
||||
description:
|
||||
- List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration.
|
||||
|
@ -109,6 +109,12 @@ options:
|
|||
default: EC2
|
||||
version_added: "1.7"
|
||||
choices: ['EC2', 'ELB']
|
||||
default_cooldown:
|
||||
description:
|
||||
- The number of seconds after a scaling activity completes before another can begin.
|
||||
required: false
|
||||
default: 300 seconds
|
||||
version_added: "2.0"
|
||||
wait_timeout:
|
||||
description:
|
||||
- how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option.
|
||||
|
@ -120,6 +126,14 @@ options:
|
|||
version_added: "1.9"
|
||||
default: yes
|
||||
required: False
|
||||
termination_policies:
|
||||
description:
|
||||
- An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
|
||||
- For 'Default', when used to create a new autoscaling group, the "Default" value is used. When used to change an existent autoscaling group, the current termination policies are mantained
|
||||
required: false
|
||||
default: Default
|
||||
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
|
||||
version_added: "2.0"
|
||||
extends_documentation_fragment: aws
|
||||
"""
|
||||
|
||||
|
@ -374,6 +388,7 @@ def create_autoscaling_group(connection, module):
|
|||
set_tags = module.params.get('tags')
|
||||
health_check_period = module.params.get('health_check_period')
|
||||
health_check_type = module.params.get('health_check_type')
|
||||
default_cooldown = module.params.get('default_cooldown')
|
||||
wait_for_instances = module.params.get('wait_for_instances')
|
||||
as_groups = connection.get_all_groups(names=[group_name])
|
||||
wait_timeout = module.params.get('wait_timeout')
|
||||
|
@ -413,7 +428,9 @@ def create_autoscaling_group(connection, module):
|
|||
connection=connection,
|
||||
tags=asg_tags,
|
||||
health_check_period=health_check_period,
|
||||
health_check_type=health_check_type)
|
||||
health_check_type=health_check_type,
|
||||
default_cooldown=default_cooldown,
|
||||
termination_policies=termination_policies)
|
||||
|
||||
try:
|
||||
connection.create_auto_scaling_group(ag)
|
||||
|
@ -774,7 +791,9 @@ def main():
|
|||
tags=dict(type='list', default=[]),
|
||||
health_check_period=dict(type='int', default=300),
|
||||
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
|
||||
wait_for_instances=dict(type='bool', default=True)
|
||||
default_cooldown=dict(type='int', default=300),
|
||||
wait_for_instances=dict(type='bool', default=True),
|
||||
termination_policies=dict(type='list', default=None)
|
||||
),
|
||||
)
|
||||
|
||||
|
|
|
@ -1,16 +1,33 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_eip
|
||||
short_description: associate an EC2 elastic IP with an instance.
|
||||
description:
|
||||
- This module associates AWS EC2 elastic IP addresses with instances
|
||||
version_added: 1.4
|
||||
version_added: "1.4"
|
||||
options:
|
||||
instance_id:
|
||||
device_id:
|
||||
description:
|
||||
- The EC2 instance id
|
||||
- The id of the device for the EIP. Can be an EC2 Instance id or Elastic Network Interface (ENI) id.
|
||||
required: false
|
||||
aliases: [ instance_id ]
|
||||
version_added: "2.0"
|
||||
public_ip:
|
||||
description:
|
||||
- The elastic IP address to associate with the instance.
|
||||
|
@ -37,14 +54,19 @@ options:
|
|||
version_added: "1.4"
|
||||
reuse_existing_ip_allowed:
|
||||
description:
|
||||
- Reuse an EIP that is not associated to an instance (when available),'''
|
||||
''' instead of allocating a new one.
|
||||
- Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one.
|
||||
required: false
|
||||
default: false
|
||||
version_added: "1.6"
|
||||
|
||||
release_on_disassociation:
|
||||
description:
|
||||
- whether or not to automatically release the EIP when it is disassociated
|
||||
required: false
|
||||
default: false
|
||||
version_added: "2.0"
|
||||
extends_documentation_fragment: aws
|
||||
author: "Lorin Hochstein (@lorin) <lorin@nimbisservices.com>"
|
||||
author: "Rick Mendes (@rickmendes) <rmendes@illumina.com>"
|
||||
notes:
|
||||
- This module will return C(public_ip) on success, which will contain the
|
||||
public IP address associated with the instance.
|
||||
|
@ -52,35 +74,36 @@ notes:
|
|||
the cloud instance is reachable via the new address. Use wait_for and
|
||||
pause to delay further playbook execution until the instance is reachable,
|
||||
if necessary.
|
||||
- This module returns multiple changed statuses on disassociation or release.
|
||||
It returns an overall status based on any changes occuring. It also returns
|
||||
individual changed statuses for disassociation and release.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: associate an elastic IP with an instance
|
||||
ec2_eip: instance_id=i-1212f003 ip=93.184.216.119
|
||||
|
||||
ec2_eip: device_id=i-1212f003 ip=93.184.216.119
|
||||
- name: associate an elastic IP with a device
|
||||
ec2_eip: device_id=eni-c8ad70f3 ip=93.184.216.119
|
||||
- name: disassociate an elastic IP from an instance
|
||||
ec2_eip: instance_id=i-1212f003 ip=93.184.216.119 state=absent
|
||||
|
||||
ec2_eip: device_id=i-1212f003 ip=93.184.216.119 state=absent
|
||||
- name: disassociate an elastic IP with a device
|
||||
ec2_eip: device_id=eni-c8ad70f3 ip=93.184.216.119 state=absent
|
||||
- name: allocate a new elastic IP and associate it with an instance
|
||||
ec2_eip: instance_id=i-1212f003
|
||||
|
||||
ec2_eip: device_id=i-1212f003
|
||||
- name: allocate a new elastic IP without associating it to anything
|
||||
action: ec2_eip
|
||||
register: eip
|
||||
- name: output the IP
|
||||
debug: msg="Allocated IP is {{ eip.public_ip }}"
|
||||
|
||||
- name: another way of allocating an elastic IP without associating it to anything
|
||||
ec2_eip: state='present'
|
||||
|
||||
- name: provision new instances with ec2
|
||||
ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes'''
|
||||
''' group=webserver count=3
|
||||
register: ec2
|
||||
- name: associate new elastic IPs with each of the instances
|
||||
ec2_eip: "instance_id={{ item }}"
|
||||
ec2_eip: "device_id={{ item }}"
|
||||
with_items: ec2.instance_ids
|
||||
|
||||
- name: allocate a new elastic IP inside a VPC in us-west-2
|
||||
ec2_eip: region=us-west-2 in_vpc=yes
|
||||
register: eip
|
||||
|
@ -98,27 +121,27 @@ except ImportError:
|
|||
class EIPException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def associate_ip_and_instance(ec2, address, instance_id, check_mode):
|
||||
if address_is_associated_with_instance(ec2, address, instance_id):
|
||||
def associate_ip_and_device(ec2, address, device_id, check_mode, isinstance=True):
|
||||
if address_is_associated_with_device(ec2, address, device_id, isinstance):
|
||||
return {'changed': False}
|
||||
|
||||
# If we're in check mode, nothing else to do
|
||||
if not check_mode:
|
||||
if address.domain == 'vpc':
|
||||
res = ec2.associate_address(instance_id,
|
||||
allocation_id=address.allocation_id)
|
||||
if isinstance:
|
||||
if address.domain == "vpc":
|
||||
res = ec2.associate_address(device_id, allocation_id=address.allocation_id)
|
||||
else:
|
||||
res = ec2.associate_address(device_id, public_ip=address.public_ip)
|
||||
else:
|
||||
res = ec2.associate_address(instance_id,
|
||||
public_ip=address.public_ip)
|
||||
res = ec2.associate_address(network_interface_id=device_id, allocation_id=address.allocation_id)
|
||||
if not res:
|
||||
raise EIPException('association failed')
|
||||
|
||||
return {'changed': True}
|
||||
|
||||
|
||||
def disassociate_ip_and_instance(ec2, address, instance_id, check_mode):
|
||||
if not address_is_associated_with_instance(ec2, address, instance_id):
|
||||
def disassociate_ip_and_device(ec2, address, device_id, check_mode, isinstance=True):
|
||||
if not address_is_associated_with_device(ec2, address, device_id, isinstance):
|
||||
return {'changed': False}
|
||||
|
||||
# If we're in check mode, nothing else to do
|
||||
|
@ -143,24 +166,33 @@ def _find_address_by_ip(ec2, public_ip):
|
|||
raise
|
||||
|
||||
|
||||
def _find_address_by_instance_id(ec2, instance_id):
|
||||
addresses = ec2.get_all_addresses(None, {'instance-id': instance_id})
|
||||
def _find_address_by_device_id(ec2, device_id, isinstance=True):
|
||||
if isinstance:
|
||||
addresses = ec2.get_all_addresses(None, {'instance-id': device_id})
|
||||
else:
|
||||
addresses = ec2.get_all_addresses(None, {'network-interface-id': device_id})
|
||||
if addresses:
|
||||
return addresses[0]
|
||||
|
||||
|
||||
def find_address(ec2, public_ip, instance_id):
|
||||
def find_address(ec2, public_ip, device_id, isinstance=True):
|
||||
""" Find an existing Elastic IP address """
|
||||
if public_ip:
|
||||
return _find_address_by_ip(ec2, public_ip)
|
||||
elif instance_id:
|
||||
return _find_address_by_instance_id(ec2, instance_id)
|
||||
elif device_id and isinstance:
|
||||
return _find_address_by_device_id(ec2, device_id)
|
||||
elif device_id:
|
||||
return _find_address_by_device_id(ec2, device_id, isinstance=False)
|
||||
|
||||
|
||||
def address_is_associated_with_instance(ec2, address, instance_id):
|
||||
""" Check if the elastic IP is currently associated with the instance """
|
||||
def address_is_associated_with_device(ec2, address, device_id, isinstance=True):
|
||||
""" Check if the elastic IP is currently associated with the device """
|
||||
address = ec2.get_all_addresses(address.public_ip)
|
||||
if address:
|
||||
return address and address.instance_id == instance_id
|
||||
if isinstance:
|
||||
return address and address[0].instance_id == device_id
|
||||
else:
|
||||
return address and address[0].network_interface_id == device_id
|
||||
return False
|
||||
|
||||
|
||||
|
@ -171,7 +203,7 @@ def allocate_address(ec2, domain, reuse_existing_ip_allowed):
|
|||
all_addresses = ec2.get_all_addresses(filters=domain_filter)
|
||||
|
||||
unassociated_addresses = [a for a in all_addresses
|
||||
if not a.instance_id]
|
||||
if not a.device_id]
|
||||
if unassociated_addresses:
|
||||
return unassociated_addresses[0]
|
||||
|
||||
|
@ -189,21 +221,33 @@ def release_address(ec2, address, check_mode):
|
|||
return {'changed': True}
|
||||
|
||||
|
||||
def find_instance(ec2, instance_id):
|
||||
def find_device(ec2, device_id, isinstance=True):
|
||||
""" Attempt to find the EC2 instance and return it """
|
||||
|
||||
reservations = ec2.get_all_reservations(instance_ids=[instance_id])
|
||||
if isinstance:
|
||||
try:
|
||||
reservations = ec2.get_all_reservations(instance_ids=[device_id])
|
||||
except boto.exception.EC2ResponseError, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if len(reservations) == 1:
|
||||
instances = reservations[0].instances
|
||||
if len(instances) == 1:
|
||||
return instances[0]
|
||||
if len(reservations) == 1:
|
||||
instances = reservations[0].instances
|
||||
if len(instances) == 1:
|
||||
return instances[0]
|
||||
else:
|
||||
try:
|
||||
interfaces = ec2.get_all_network_interfaces(network_interface_ids=[device_id])
|
||||
except boto.exception.EC2ResponseError, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
raise EIPException("could not find instance" + instance_id)
|
||||
if len(interfaces) == 1:
|
||||
return interfaces[0]
|
||||
|
||||
raise EIPException("could not find instance" + device_id)
|
||||
|
||||
|
||||
def ensure_present(ec2, domain, address, instance_id,
|
||||
reuse_existing_ip_allowed, check_mode):
|
||||
def ensure_present(ec2, domain, address, device_id,
|
||||
reuse_existing_ip_allowed, check_mode, isinstance=True):
|
||||
changed = False
|
||||
|
||||
# Return the EIP object since we've been given a public IP
|
||||
|
@ -214,28 +258,39 @@ def ensure_present(ec2, domain, address, instance_id,
|
|||
address = allocate_address(ec2, domain, reuse_existing_ip_allowed)
|
||||
changed = True
|
||||
|
||||
if instance_id:
|
||||
if device_id:
|
||||
# Allocate an IP for instance since no public_ip was provided
|
||||
instance = find_instance(ec2, instance_id)
|
||||
if isinstance:
|
||||
instance = find_device(ec2, device_id)
|
||||
# Associate address object (provided or allocated) with instance
|
||||
assoc_result = associate_ip_and_device(ec2, address, device_id,
|
||||
check_mode)
|
||||
else:
|
||||
instance = find_device(ec2, device_id, isinstance=False)
|
||||
# Associate address object (provided or allocated) with instance
|
||||
assoc_result = associate_ip_and_device(ec2, address, device_id,
|
||||
check_mode, isinstance=False)
|
||||
|
||||
if instance.vpc_id:
|
||||
domain = 'vpc'
|
||||
|
||||
# Associate address object (provided or allocated) with instance
|
||||
assoc_result = associate_ip_and_instance(ec2, address, instance_id,
|
||||
check_mode)
|
||||
changed = changed or assoc_result['changed']
|
||||
|
||||
return {'changed': changed, 'public_ip': address.public_ip}
|
||||
|
||||
|
||||
def ensure_absent(ec2, domain, address, instance_id, check_mode):
|
||||
def ensure_absent(ec2, domain, address, device_id, check_mode, isinstance=True):
|
||||
if not address:
|
||||
return {'changed': False}
|
||||
|
||||
# disassociating address from instance
|
||||
if instance_id:
|
||||
return disassociate_ip_and_instance(ec2, address, instance_id,
|
||||
check_mode)
|
||||
if device_id:
|
||||
if isinstance:
|
||||
return disassociate_ip_and_device(ec2, address, device_id,
|
||||
check_mode)
|
||||
else:
|
||||
return disassociate_ip_and_device(ec2, address, device_id,
|
||||
check_mode, isinstance=False)
|
||||
# releasing address
|
||||
else:
|
||||
return release_address(ec2, address, check_mode)
|
||||
|
@ -244,13 +299,14 @@ def ensure_absent(ec2, domain, address, instance_id, check_mode):
|
|||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
instance_id=dict(required=False),
|
||||
device_id=dict(required=False, aliases=['instance_id']),
|
||||
public_ip=dict(required=False, aliases=['ip']),
|
||||
state=dict(required=False, default='present',
|
||||
choices=['present', 'absent']),
|
||||
in_vpc=dict(required=False, type='bool', default=False),
|
||||
reuse_existing_ip_allowed=dict(required=False, type='bool',
|
||||
default=False),
|
||||
release_on_disassociation=dict(required=False, type='bool', default=False),
|
||||
wait_timeout=dict(default=300),
|
||||
))
|
||||
|
||||
|
@ -264,28 +320,52 @@ def main():
|
|||
|
||||
ec2 = ec2_connect(module)
|
||||
|
||||
instance_id = module.params.get('instance_id')
|
||||
device_id = module.params.get('device_id')
|
||||
public_ip = module.params.get('public_ip')
|
||||
state = module.params.get('state')
|
||||
in_vpc = module.params.get('in_vpc')
|
||||
domain = 'vpc' if in_vpc else None
|
||||
reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed')
|
||||
release_on_disassociation = module.params.get('release_on_disassociation')
|
||||
|
||||
if device_id and device_id.startswith('i-'):
|
||||
is_instance=True
|
||||
elif device_id:
|
||||
is_instance=False
|
||||
|
||||
try:
|
||||
address = find_address(ec2, public_ip, instance_id)
|
||||
if device_id:
|
||||
address = find_address(ec2, public_ip, device_id, isinstance=is_instance)
|
||||
else:
|
||||
address = False
|
||||
|
||||
if state == 'present':
|
||||
result = ensure_present(ec2, domain, address, instance_id,
|
||||
if device_id:
|
||||
result = ensure_present(ec2, domain, address, device_id,
|
||||
reuse_existing_ip_allowed,
|
||||
module.check_mode)
|
||||
module.check_mode, isinstance=is_instance)
|
||||
else:
|
||||
address = allocate_address(ec2, domain, reuse_existing_ip_allowed)
|
||||
result = {'changed': True, 'public_ip': address.public_ip}
|
||||
else:
|
||||
result = ensure_absent(ec2, domain, address, instance_id, module.check_mode)
|
||||
if device_id:
|
||||
disassociated = ensure_absent(ec2, domain, address, device_id, module.check_mode, isinstance=is_instance)
|
||||
|
||||
if release_on_disassociation and disassociated['changed']:
|
||||
released = release_address(ec2, address, module.check_mode)
|
||||
result = { 'changed': True, 'disassociated': disassociated, 'released': released }
|
||||
else:
|
||||
result = { 'changed': disassociated['changed'], 'disassociated': disassociated, 'released': { 'changed': False } }
|
||||
else:
|
||||
address = find_address(ec2, public_ip, None)
|
||||
released = release_address(ec2, address, module.check_mode)
|
||||
result = { 'changed': released['changed'], 'disassociated': { 'changed': False }, 'released': released }
|
||||
|
||||
except (boto.exception.EC2ResponseError, EIPException) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
from ansible.module_utils.ec2 import * # noqa
|
||||
|
|
|
@ -22,7 +22,9 @@ description:
|
|||
- Will be marked changed when called only if state is changed.
|
||||
short_description: Creates or destroys Amazon ELB.
|
||||
version_added: "1.5"
|
||||
author: "Jim Dalton (@jsdalton)"
|
||||
author:
|
||||
- "Jim Dalton (@jsdalton)"
|
||||
- "Rick Mendes (@rickmendes)"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
|
@ -56,6 +58,12 @@ options:
|
|||
require: false
|
||||
default: None
|
||||
version_added: "1.6"
|
||||
security_group_names:
|
||||
description:
|
||||
- A list of security group names to apply to the elb
|
||||
require: false
|
||||
default: None
|
||||
version_added: "2.0"
|
||||
health_check:
|
||||
description:
|
||||
- An associative array of health check configuration settings (see example)
|
||||
|
@ -68,7 +76,7 @@ options:
|
|||
aliases: ['aws_region', 'ec2_region']
|
||||
subnets:
|
||||
description:
|
||||
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
|
||||
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
|
||||
required: false
|
||||
default: None
|
||||
aliases: []
|
||||
|
@ -77,7 +85,7 @@ options:
|
|||
description:
|
||||
- Purge existing subnet on ELB that are not found in subnets
|
||||
required: false
|
||||
default: false
|
||||
default: false
|
||||
version_added: "1.7"
|
||||
scheme:
|
||||
description:
|
||||
|
@ -147,7 +155,7 @@ EXAMPLES = """
|
|||
name: "test-vpc"
|
||||
scheme: internal
|
||||
state: present
|
||||
subnets:
|
||||
subnets:
|
||||
- subnet-abcd1234
|
||||
- subnet-1a2b3c4d
|
||||
listeners:
|
||||
|
@ -213,7 +221,7 @@ EXAMPLES = """
|
|||
instance_port: 80
|
||||
purge_zones: yes
|
||||
|
||||
# Creates a ELB and assigns a list of subnets to it.
|
||||
# Creates a ELB and assigns a list of subnets to it.
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
state: present
|
||||
|
@ -297,10 +305,10 @@ class ElbManager(object):
|
|||
"""Handles ELB creation and destruction"""
|
||||
|
||||
def __init__(self, module, name, listeners=None, purge_listeners=None,
|
||||
zones=None, purge_zones=None, security_group_ids=None,
|
||||
zones=None, purge_zones=None, security_group_ids=None,
|
||||
health_check=None, subnets=None, purge_subnets=None,
|
||||
scheme="internet-facing", connection_draining_timeout=None,
|
||||
cross_az_load_balancing=None,
|
||||
cross_az_load_balancing=None,
|
||||
stickiness=None, region=None, **aws_connect_params):
|
||||
|
||||
self.module = module
|
||||
|
@ -361,7 +369,8 @@ class ElbManager(object):
|
|||
if not check_elb:
|
||||
info = {
|
||||
'name': self.name,
|
||||
'status': self.status
|
||||
'status': self.status,
|
||||
'region': self.region
|
||||
}
|
||||
else:
|
||||
try:
|
||||
|
@ -384,9 +393,34 @@ class ElbManager(object):
|
|||
'hosted_zone_name': check_elb.canonical_hosted_zone_name,
|
||||
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
|
||||
'lb_cookie_policy': lb_cookie_policy,
|
||||
'app_cookie_policy': app_cookie_policy
|
||||
'app_cookie_policy': app_cookie_policy,
|
||||
'instances': [instance.id for instance in check_elb.instances],
|
||||
'out_of_service_count': 0,
|
||||
'in_service_count': 0,
|
||||
'unknown_instance_state_count': 0,
|
||||
'region': self.region
|
||||
}
|
||||
|
||||
# status of instances behind the ELB
|
||||
if info['instances']:
|
||||
info['instance_health'] = [ dict(
|
||||
instance_id = instance_state.instance_id,
|
||||
reason_code = instance_state.reason_code,
|
||||
state = instance_state.state
|
||||
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
|
||||
else:
|
||||
info['instance_health'] = []
|
||||
|
||||
# instance state counts: InService or OutOfService
|
||||
if info['instance_health']:
|
||||
for instance_state in info['instance_health']:
|
||||
if instance_state['state'] == "InService":
|
||||
info['in_service_count'] += 1
|
||||
elif instance_state['state'] == "OutOfService":
|
||||
info['out_of_service_count'] += 1
|
||||
else:
|
||||
info['unknown_instance_state_count'] += 1
|
||||
|
||||
if check_elb.health_check:
|
||||
info['health_check'] = {
|
||||
'target': check_elb.health_check.target,
|
||||
|
@ -418,7 +452,7 @@ class ElbManager(object):
|
|||
else:
|
||||
info['cross_az_load_balancing'] = 'no'
|
||||
|
||||
# return stickiness info?
|
||||
# return stickiness info?
|
||||
|
||||
return info
|
||||
|
||||
|
@ -539,8 +573,8 @@ class ElbManager(object):
|
|||
# N.B. string manipulations on protocols below (str(), upper()) is to
|
||||
# ensure format matches output from ELB API
|
||||
listener_list = [
|
||||
listener['load_balancer_port'],
|
||||
listener['instance_port'],
|
||||
int(listener['load_balancer_port']),
|
||||
int(listener['instance_port']),
|
||||
str(listener['protocol'].upper()),
|
||||
]
|
||||
|
||||
|
@ -598,7 +632,7 @@ class ElbManager(object):
|
|||
self._attach_subnets(subnets_to_attach)
|
||||
if subnets_to_detach:
|
||||
self._detach_subnets(subnets_to_detach)
|
||||
|
||||
|
||||
def _set_zones(self):
|
||||
"""Determine which zones need to be enabled or disabled on the ELB"""
|
||||
if self.zones:
|
||||
|
@ -703,7 +737,7 @@ class ElbManager(object):
|
|||
else:
|
||||
self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
|
||||
self.changed = True
|
||||
|
||||
|
||||
self._set_listener_policy(listeners_dict, policy)
|
||||
|
||||
def select_stickiness_policy(self):
|
||||
|
@ -770,7 +804,7 @@ class ElbManager(object):
|
|||
|
||||
else:
|
||||
self._set_listener_policy(listeners_dict)
|
||||
|
||||
|
||||
def _get_health_check_target(self):
|
||||
"""Compose target string from healthcheck parameters"""
|
||||
protocol = self.health_check['ping_protocol'].upper()
|
||||
|
@ -792,6 +826,7 @@ def main():
|
|||
zones={'default': None, 'required': False, 'type': 'list'},
|
||||
purge_zones={'default': False, 'required': False, 'type': 'bool'},
|
||||
security_group_ids={'default': None, 'required': False, 'type': 'list'},
|
||||
security_group_names={'default': None, 'required': False, 'type': 'list'},
|
||||
health_check={'default': None, 'required': False, 'type': 'dict'},
|
||||
subnets={'default': None, 'required': False, 'type': 'list'},
|
||||
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
|
||||
|
@ -804,6 +839,7 @@ def main():
|
|||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
mutually_exclusive = [['security_group_ids', 'security_group_names']]
|
||||
)
|
||||
|
||||
if not HAS_BOTO:
|
||||
|
@ -820,6 +856,7 @@ def main():
|
|||
zones = module.params['zones']
|
||||
purge_zones = module.params['purge_zones']
|
||||
security_group_ids = module.params['security_group_ids']
|
||||
security_group_names = module.params['security_group_names']
|
||||
health_check = module.params['health_check']
|
||||
subnets = module.params['subnets']
|
||||
purge_subnets = module.params['purge_subnets']
|
||||
|
@ -834,6 +871,21 @@ def main():
|
|||
if state == 'present' and not (zones or subnets):
|
||||
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
|
||||
|
||||
if security_group_names:
|
||||
security_group_ids = []
|
||||
try:
|
||||
ec2 = ec2_connect(module)
|
||||
grp_details = ec2.get_all_security_groups()
|
||||
|
||||
for group_name in security_group_names:
|
||||
if isinstance(group_name, basestring):
|
||||
group_name = [group_name]
|
||||
|
||||
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
|
||||
security_group_ids.extend(group_id)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg = str(e))
|
||||
|
||||
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
|
||||
purge_zones, security_group_ids, health_check,
|
||||
subnets, purge_subnets, scheme,
|
||||
|
|
|
@ -29,7 +29,7 @@ options:
|
|||
required: false
|
||||
default: 'yes'
|
||||
choices: ['yes', 'no']
|
||||
version_added: 1.5.1
|
||||
version_added: '1.5.1'
|
||||
description:
|
||||
- This module fetches data from the metadata servers in ec2 (aws) as per
|
||||
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html.
|
||||
|
|
|
@ -1,6 +1,19 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
|
@ -336,19 +349,24 @@ def main():
|
|||
rule['from_port'] = None
|
||||
rule['to_port'] = None
|
||||
|
||||
# If rule already exists, don't later delete it
|
||||
ruleId = make_rule_key('in', rule, group_id, ip)
|
||||
if ruleId in groupRules:
|
||||
del groupRules[ruleId]
|
||||
# Otherwise, add new rule
|
||||
else:
|
||||
grantGroup = None
|
||||
if group_id:
|
||||
grantGroup = groups[group_id]
|
||||
# Convert ip to list we can iterate over
|
||||
if not isinstance(ip, list):
|
||||
ip = [ip]
|
||||
|
||||
if not module.check_mode:
|
||||
group.authorize(rule['proto'], rule['from_port'], rule['to_port'], ip, grantGroup)
|
||||
changed = True
|
||||
# If rule already exists, don't later delete it
|
||||
for thisip in ip:
|
||||
ruleId = make_rule_key('in', rule, group_id, thisip)
|
||||
if ruleId in groupRules:
|
||||
del groupRules[ruleId]
|
||||
# Otherwise, add new rule
|
||||
else:
|
||||
grantGroup = None
|
||||
if group_id:
|
||||
grantGroup = groups[group_id]
|
||||
|
||||
if not module.check_mode:
|
||||
group.authorize(rule['proto'], rule['from_port'], rule['to_port'], thisip, grantGroup)
|
||||
changed = True
|
||||
|
||||
# Finally, remove anything left in the groupRules -- these will be defunct rules
|
||||
if purge_rules:
|
||||
|
@ -383,25 +401,30 @@ def main():
|
|||
rule['from_port'] = None
|
||||
rule['to_port'] = None
|
||||
|
||||
# If rule already exists, don't later delete it
|
||||
ruleId = make_rule_key('out', rule, group_id, ip)
|
||||
if ruleId in groupRules:
|
||||
del groupRules[ruleId]
|
||||
# Otherwise, add new rule
|
||||
else:
|
||||
grantGroup = None
|
||||
if group_id:
|
||||
grantGroup = groups[group_id].id
|
||||
# Convert ip to list we can iterate over
|
||||
if not isinstance(ip, list):
|
||||
ip = [ip]
|
||||
|
||||
if not module.check_mode:
|
||||
ec2.authorize_security_group_egress(
|
||||
group_id=group.id,
|
||||
ip_protocol=rule['proto'],
|
||||
from_port=rule['from_port'],
|
||||
to_port=rule['to_port'],
|
||||
src_group_id=grantGroup,
|
||||
cidr_ip=ip)
|
||||
changed = True
|
||||
# If rule already exists, don't later delete it
|
||||
for thisip in ip:
|
||||
ruleId = make_rule_key('out', rule, group_id, thisip)
|
||||
if ruleId in groupRules:
|
||||
del groupRules[ruleId]
|
||||
# Otherwise, add new rule
|
||||
else:
|
||||
grantGroup = None
|
||||
if group_id:
|
||||
grantGroup = groups[group_id].id
|
||||
|
||||
if not module.check_mode:
|
||||
ec2.authorize_security_group_egress(
|
||||
group_id=group.id,
|
||||
ip_protocol=rule['proto'],
|
||||
from_port=rule['from_port'],
|
||||
to_port=rule['to_port'],
|
||||
src_group_id=grantGroup,
|
||||
cidr_ip=thisip)
|
||||
changed = True
|
||||
elif vpc_id and not module.check_mode:
|
||||
# when using a vpc, but no egress rules are specified,
|
||||
# we add in a default allow all out rule, which was the
|
||||
|
|
|
@ -1,6 +1,19 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
|
@ -127,25 +140,23 @@ def main():
|
|||
if state == 'absent':
|
||||
if key:
|
||||
'''found a match, delete it'''
|
||||
try:
|
||||
key.delete()
|
||||
if wait:
|
||||
start = time.time()
|
||||
action_complete = False
|
||||
while (time.time() - start) < wait_timeout:
|
||||
if not ec2.get_key_pair(name):
|
||||
action_complete = True
|
||||
break
|
||||
time.sleep(1)
|
||||
if not action_complete:
|
||||
module.fail_json(msg="timed out while waiting for the key to be removed")
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e))
|
||||
else:
|
||||
key = None
|
||||
changed = True
|
||||
else:
|
||||
'''no match found, no changes required'''
|
||||
if not module.check_mode:
|
||||
try:
|
||||
key.delete()
|
||||
if wait:
|
||||
start = time.time()
|
||||
action_complete = False
|
||||
while (time.time() - start) < wait_timeout:
|
||||
if not ec2.get_key_pair(name):
|
||||
action_complete = True
|
||||
break
|
||||
time.sleep(1)
|
||||
if not action_complete:
|
||||
module.fail_json(msg="timed out while waiting for the key to be removed")
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e))
|
||||
key = None
|
||||
changed = True
|
||||
|
||||
# Ensure requested key is present
|
||||
elif state == 'present':
|
||||
|
|
|
@ -77,7 +77,7 @@ options:
|
|||
- Kernel id for the EC2 instance
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
aliases: []
|
||||
spot_price:
|
||||
description:
|
||||
- The spot price you are bidding. Only applies for an autoscaling group with spot instances.
|
||||
|
@ -116,6 +116,18 @@ options:
|
|||
default: false
|
||||
aliases: []
|
||||
version_added: "1.8"
|
||||
classic_link_vpc_id:
|
||||
description:
|
||||
- Id of ClassicLink enabled VPC
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.0"
|
||||
classic_link_vpc_security_groups:
|
||||
description:
|
||||
- A list of security group id's with which to associate the ClassicLink VPC instances.
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.0"
|
||||
extends_documentation_fragment: aws
|
||||
"""
|
||||
|
||||
|
@ -184,6 +196,8 @@ def create_launch_config(connection, module):
|
|||
ramdisk_id = module.params.get('ramdisk_id')
|
||||
instance_profile_name = module.params.get('instance_profile_name')
|
||||
ebs_optimized = module.params.get('ebs_optimized')
|
||||
classic_link_vpc_id = module.params.get('classic_link_vpc_id')
|
||||
classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups')
|
||||
bdm = BlockDeviceMapping()
|
||||
|
||||
if volumes:
|
||||
|
@ -206,10 +220,12 @@ def create_launch_config(connection, module):
|
|||
kernel_id=kernel_id,
|
||||
spot_price=spot_price,
|
||||
instance_monitoring=instance_monitoring,
|
||||
associate_public_ip_address = assign_public_ip,
|
||||
associate_public_ip_address=assign_public_ip,
|
||||
ramdisk_id=ramdisk_id,
|
||||
instance_profile_name=instance_profile_name,
|
||||
ebs_optimized=ebs_optimized,
|
||||
classic_link_vpc_security_groups=classic_link_vpc_security_groups,
|
||||
classic_link_vpc_id=classic_link_vpc_id,
|
||||
)
|
||||
|
||||
launch_configs = connection.get_all_launch_configurations(names=[name])
|
||||
|
@ -221,11 +237,37 @@ def create_launch_config(connection, module):
|
|||
changed = True
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=str(e))
|
||||
result = launch_configs[0]
|
||||
|
||||
module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time),
|
||||
image_id=result.image_id, arn=result.launch_configuration_arn,
|
||||
security_groups=result.security_groups, instance_type=instance_type)
|
||||
result = dict(
|
||||
((a[0], a[1]) for a in vars(launch_configs[0]).items()
|
||||
if a[0] not in ('connection', 'created_time', 'instance_monitoring', 'block_device_mappings'))
|
||||
)
|
||||
result['created_time'] = str(launch_configs[0].created_time)
|
||||
# Looking at boto's launchconfig.py, it looks like this could be a boolean
|
||||
# value or an object with an enabled attribute. The enabled attribute
|
||||
# could be a boolean or a string representation of a boolean. Since
|
||||
# I can't test all permutations myself to see if my reading of the code is
|
||||
# correct, have to code this *very* defensively
|
||||
if launch_configs[0].instance_monitoring is True:
|
||||
result['instance_monitoring'] = True
|
||||
else:
|
||||
try:
|
||||
result['instance_monitoring'] = module.boolean(launch_configs[0].instance_monitoring.enabled)
|
||||
except AttributeError:
|
||||
result['instance_monitoring'] = False
|
||||
if launch_configs[0].block_device_mappings is not None:
|
||||
result['block_device_mappings'] = []
|
||||
for bdm in launch_configs[0].block_device_mappings:
|
||||
result['block_device_mappings'].append(dict(device_name=bdm.device_name, virtual_name=bdm.virtual_name))
|
||||
if bdm.ebs is not None:
|
||||
result['block_device_mappings'][-1]['ebs'] = dict(snapshot_id=bdm.ebs.snapshot_id, volume_size=bdm.ebs.volume_size)
|
||||
|
||||
|
||||
module.exit_json(changed=changed, name=result['name'], created_time=result['created_time'],
|
||||
image_id=result['image_id'], arn=result['launch_configuration_arn'],
|
||||
security_groups=result['security_groups'],
|
||||
instance_type=result['instance_type'],
|
||||
result=result)
|
||||
|
||||
|
||||
def delete_launch_config(connection, module):
|
||||
|
@ -257,7 +299,9 @@ def main():
|
|||
ebs_optimized=dict(default=False, type='bool'),
|
||||
associate_public_ip_address=dict(type='bool'),
|
||||
instance_monitoring=dict(default=False, type='bool'),
|
||||
assign_public_ip=dict(type='bool')
|
||||
assign_public_ip=dict(type='bool'),
|
||||
classic_link_vpc_security_groups=dict(type='list'),
|
||||
classic_link_vpc_id=dict(type='str')
|
||||
)
|
||||
)
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ def create_metric_alarm(connection, module):
|
|||
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
|
||||
alarm.comparison = comparisons[comparison]
|
||||
|
||||
dim1 = module.params.get('dimensions')
|
||||
dim1 = module.params.get('dimensions', {})
|
||||
dim2 = alarm.dimensions
|
||||
|
||||
for keys in dim1:
|
||||
|
|
|
@ -1,4 +1,18 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = """
|
||||
module: ec2_scaling_policy
|
||||
|
|
|
@ -74,6 +74,12 @@ options:
|
|||
- snapshot id to remove
|
||||
required: false
|
||||
version_added: "1.9"
|
||||
last_snapshot_min_age:
|
||||
description:
|
||||
- If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created.
|
||||
required: false
|
||||
default: 0
|
||||
version_added: "1.9"
|
||||
|
||||
author: "Will Thames (@willthames)"
|
||||
extends_documentation_fragment: aws
|
||||
|
@ -82,7 +88,7 @@ extends_documentation_fragment: aws
|
|||
EXAMPLES = '''
|
||||
# Simple snapshot of volume using volume_id
|
||||
- ec2_snapshot:
|
||||
volume_id: vol-abcdef12
|
||||
volume_id: vol-abcdef12
|
||||
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
|
||||
|
||||
# Snapshot of volume mounted on device_name attached to instance_id
|
||||
|
@ -104,9 +110,16 @@ EXAMPLES = '''
|
|||
module: ec2_snapshot
|
||||
snapshot_id: snap-abcd1234
|
||||
state: absent
|
||||
'''
|
||||
|
||||
# Create a snapshot only if the most recent one is older than 1 hour
|
||||
- local_action:
|
||||
module: ec2_snapshot
|
||||
volume_id: vol-abcdef12
|
||||
last_snapshot_min_age: 60
|
||||
'''
|
||||
|
||||
import time
|
||||
import datetime
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
|
@ -115,7 +128,128 @@ except ImportError:
|
|||
HAS_BOTO = False
|
||||
|
||||
|
||||
def main():
|
||||
# Find the most recent snapshot
|
||||
def _get_snapshot_starttime(snap):
|
||||
return datetime.datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
|
||||
|
||||
|
||||
def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None):
|
||||
"""
|
||||
Gets the most recently created snapshot and optionally filters the result
|
||||
if the snapshot is too old
|
||||
:param snapshots: list of snapshots to search
|
||||
:param max_snapshot_age_secs: filter the result if its older than this
|
||||
:param now: simulate time -- used for unit testing
|
||||
:return:
|
||||
"""
|
||||
if len(snapshots) == 0:
|
||||
return None
|
||||
|
||||
if not now:
|
||||
now = datetime.datetime.utcnow()
|
||||
|
||||
youngest_snapshot = min(snapshots, key=_get_snapshot_starttime)
|
||||
|
||||
# See if the snapshot is younger that the given max age
|
||||
snapshot_start = datetime.datetime.strptime(youngest_snapshot.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
|
||||
snapshot_age = now - snapshot_start
|
||||
|
||||
if max_snapshot_age_secs is not None:
|
||||
if snapshot_age.total_seconds() > max_snapshot_age_secs:
|
||||
return None
|
||||
|
||||
return youngest_snapshot
|
||||
|
||||
|
||||
def _create_with_wait(snapshot, wait_timeout_secs, sleep_func=time.sleep):
|
||||
"""
|
||||
Wait for the snapshot to be created
|
||||
:param snapshot:
|
||||
:param wait_timeout_secs: fail this step after this many seconds
|
||||
:param sleep_func:
|
||||
:return:
|
||||
"""
|
||||
time_waited = 0
|
||||
snapshot.update()
|
||||
while snapshot.status != 'completed':
|
||||
sleep_func(3)
|
||||
snapshot.update()
|
||||
time_waited += 3
|
||||
if wait_timeout_secs and time_waited > wait_timeout_secs:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def create_snapshot(module, ec2, state=None, description=None, wait=None,
|
||||
wait_timeout=None, volume_id=None, instance_id=None,
|
||||
snapshot_id=None, device_name=None, snapshot_tags=None,
|
||||
last_snapshot_min_age=None):
|
||||
snapshot = None
|
||||
changed = False
|
||||
|
||||
required = [volume_id, snapshot_id, instance_id]
|
||||
if required.count(None) != len(required) - 1: # only 1 must be set
|
||||
module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified')
|
||||
if instance_id and not device_name or device_name and not instance_id:
|
||||
module.fail_json(msg='Instance ID and device name must both be specified')
|
||||
|
||||
if instance_id:
|
||||
try:
|
||||
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
if not volumes:
|
||||
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
|
||||
|
||||
volume_id = volumes[0].id
|
||||
|
||||
if state == 'absent':
|
||||
if not snapshot_id:
|
||||
module.fail_json(msg = 'snapshot_id must be set when state is absent')
|
||||
try:
|
||||
ec2.delete_snapshot(snapshot_id)
|
||||
except boto.exception.BotoServerError, e:
|
||||
# exception is raised if snapshot does not exist
|
||||
if e.error_code == 'InvalidSnapshot.NotFound':
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
# successful delete
|
||||
module.exit_json(changed=True)
|
||||
|
||||
if last_snapshot_min_age > 0:
|
||||
try:
|
||||
current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id})
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
|
||||
snapshot = _get_most_recent_snapshot(current_snapshots,
|
||||
max_snapshot_age_secs=last_snapshot_min_age)
|
||||
try:
|
||||
# Create a new snapshot if we didn't find an existing one to use
|
||||
if snapshot is None:
|
||||
snapshot = ec2.create_snapshot(volume_id, description=description)
|
||||
changed = True
|
||||
if wait:
|
||||
if not _create_with_wait(snapshot, wait_timeout):
|
||||
module.fail_json(msg='Timed out while creating snapshot.')
|
||||
if snapshot_tags:
|
||||
for k, v in snapshot_tags.items():
|
||||
snapshot.add_tag(k, v)
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
module.exit_json(changed=changed,
|
||||
snapshot_id=snapshot.id,
|
||||
volume_id=snapshot.volume_id,
|
||||
volume_size=snapshot.volume_size,
|
||||
tags=snapshot.tags.copy())
|
||||
|
||||
|
||||
def create_snapshot_ansible_module():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
|
@ -124,13 +258,19 @@ def main():
|
|||
instance_id = dict(),
|
||||
snapshot_id = dict(),
|
||||
device_name = dict(),
|
||||
wait = dict(type='bool', default='true'),
|
||||
wait_timeout = dict(default=0),
|
||||
wait = dict(type='bool', default=True),
|
||||
wait_timeout = dict(type='int', default=0),
|
||||
last_snapshot_min_age = dict(type='int', default=0),
|
||||
snapshot_tags = dict(type='dict', default=dict()),
|
||||
state = dict(choices=['absent','present'], default='present'),
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
return module
|
||||
|
||||
|
||||
def main():
|
||||
module = create_snapshot_ansible_module()
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
@ -142,60 +282,30 @@ def main():
|
|||
device_name = module.params.get('device_name')
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = module.params.get('wait_timeout')
|
||||
last_snapshot_min_age = module.params.get('last_snapshot_min_age')
|
||||
snapshot_tags = module.params.get('snapshot_tags')
|
||||
state = module.params.get('state')
|
||||
|
||||
if not volume_id and not instance_id and not snapshot_id or volume_id and instance_id and snapshot_id:
|
||||
module.fail_json('One and only one of volume_id or instance_id or snapshot_id must be specified')
|
||||
if instance_id and not device_name or device_name and not instance_id:
|
||||
module.fail_json('Instance ID and device name must both be specified')
|
||||
|
||||
ec2 = ec2_connect(module)
|
||||
|
||||
if instance_id:
|
||||
try:
|
||||
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
|
||||
if not volumes:
|
||||
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
|
||||
volume_id = volumes[0].id
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
if state == 'absent':
|
||||
if not snapshot_id:
|
||||
module.fail_json(msg = 'snapshot_id must be set when state is absent')
|
||||
try:
|
||||
snapshots = ec2.get_all_snapshots([snapshot_id])
|
||||
ec2.delete_snapshot(snapshot_id)
|
||||
module.exit_json(changed=True)
|
||||
except boto.exception.BotoServerError, e:
|
||||
# exception is raised if snapshot does not exist
|
||||
if e.error_code == 'InvalidSnapshot.NotFound':
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
try:
|
||||
snapshot = ec2.create_snapshot(volume_id, description=description)
|
||||
time_waited = 0
|
||||
if wait:
|
||||
snapshot.update()
|
||||
while snapshot.status != 'completed':
|
||||
time.sleep(3)
|
||||
snapshot.update()
|
||||
time_waited += 3
|
||||
if wait_timeout and time_waited > wait_timeout:
|
||||
module.fail_json('Timed out while creating snapshot.')
|
||||
for k, v in snapshot_tags.items():
|
||||
snapshot.add_tag(k, v)
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
module.exit_json(changed=True, snapshot_id=snapshot.id, volume_id=snapshot.volume_id,
|
||||
volume_size=snapshot.volume_size, tags=snapshot.tags.copy())
|
||||
create_snapshot(
|
||||
module=module,
|
||||
state=state,
|
||||
description=description,
|
||||
wait=wait,
|
||||
wait_timeout=wait_timeout,
|
||||
ec2=ec2,
|
||||
volume_id=volume_id,
|
||||
instance_id=instance_id,
|
||||
snapshot_id=snapshot_id,
|
||||
device_name=device_name,
|
||||
snapshot_tags=snapshot_tags,
|
||||
last_snapshot_min_age=last_snapshot_min_age
|
||||
)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
main()
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -140,12 +140,13 @@ EXAMPLES = '''
|
|||
- ec2_vol:
|
||||
instance: "{{ item.id }} "
|
||||
volume_size: 5
|
||||
with_items: ec2.instances
|
||||
with_items: ec2.instances
|
||||
register: ec2_vol
|
||||
|
||||
# Example: Launch an instance and then add a volume if not already attached
|
||||
# * Volume will be created with the given name if not already created.
|
||||
# * Nothing will happen if the volume is already attached.
|
||||
# * Requires Ansible 2.0
|
||||
|
||||
- ec2:
|
||||
keypair: "{{ keypair }}"
|
||||
|
@ -436,11 +437,11 @@ def main():
|
|||
|
||||
# Delaying the checks until after the instance check allows us to get volume ids for existing volumes
|
||||
# without needing to pass an unused volume_size
|
||||
if not volume_size and not (id or name):
|
||||
module.fail_json(msg="You must specify an existing volume with id or name or a volume_size")
|
||||
if not volume_size and not (id or name or snapshot):
|
||||
module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot")
|
||||
|
||||
if volume_size and id:
|
||||
module.fail_json(msg="Cannot specify volume_size and id")
|
||||
if volume_size and (id or snapshot):
|
||||
module.fail_json(msg="Cannot specify volume_size together with id or snapshot")
|
||||
|
||||
if state == 'absent':
|
||||
delete_volume(module, ec2)
|
||||
|
|
295
cloud/amazon/ec2_vpc_net.py
Normal file
295
cloud/amazon/ec2_vpc_net.py
Normal file
|
@ -0,0 +1,295 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_vpc_net
|
||||
short_description: Configure AWS virtual private clouds
|
||||
description:
|
||||
- Create or terminate AWS virtual private clouds. This module has a dependency on python-boto.
|
||||
version_added: "2.0"
|
||||
author: Jonathan Davila (@defionscode)
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists.
|
||||
required: yes
|
||||
cidr_block:
|
||||
description:
|
||||
- The CIDR of the VPC
|
||||
required: yes
|
||||
tenancy:
|
||||
description:
|
||||
- Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
|
||||
required: false
|
||||
default: default
|
||||
choices: [ 'default', 'dedicated' ]
|
||||
dns_support:
|
||||
description:
|
||||
- Whether to enable AWS DNS support.
|
||||
required: false
|
||||
default: yes
|
||||
choices: [ 'yes', 'no' ]
|
||||
dns_hostnames:
|
||||
description:
|
||||
- Whether to enable AWS hostname support.
|
||||
required: false
|
||||
default: yes
|
||||
choices: [ 'yes', 'no' ]
|
||||
dhcp_opts_id:
|
||||
description:
|
||||
- the id of the DHCP options to use for this vpc
|
||||
default: null
|
||||
required: false
|
||||
tags:
|
||||
description:
|
||||
- The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different.
|
||||
default: None
|
||||
required: false
|
||||
aliases: [ 'resource_tags' ]
|
||||
state:
|
||||
description:
|
||||
- The state of the VPC. Either absent or present.
|
||||
default: present
|
||||
required: false
|
||||
choices: [ 'present', 'absent' ]
|
||||
multi_ok:
|
||||
description:
|
||||
- By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created.
|
||||
default: false
|
||||
required: false
|
||||
|
||||
extends_documentation_fragment: aws
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Create a VPC with dedicate tenancy and a couple of tags
|
||||
|
||||
- ec2_vpc_net:
|
||||
name: Module_dev2
|
||||
cidr_block: 10.10.0.0/16
|
||||
region: us-east-1
|
||||
tags:
|
||||
module: ec2_vpc_net
|
||||
this: works
|
||||
tenancy: dedicated
|
||||
|
||||
'''
|
||||
|
||||
import time
|
||||
import sys
|
||||
|
||||
try:
|
||||
import boto
|
||||
import boto.ec2
|
||||
import boto.vpc
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO=True
|
||||
except ImportError:
|
||||
HAS_BOTO=False
|
||||
|
||||
def boto_exception(err):
|
||||
'''generic error message handler'''
|
||||
if hasattr(err, 'error_message'):
|
||||
error = err.error_message
|
||||
elif hasattr(err, 'message'):
|
||||
error = err.message
|
||||
else:
|
||||
error = '%s: %s' % (Exception, err)
|
||||
|
||||
return error
|
||||
|
||||
def vpc_exists(module, vpc, name, cidr_block, multi):
|
||||
"""Returns True or False in regards to the existence of a VPC. When supplied
|
||||
with a CIDR, it will check for matching tags to determine if it is a match
|
||||
otherwise it will assume the VPC does not exist and thus return false.
|
||||
"""
|
||||
matched_vpc = None
|
||||
|
||||
try:
|
||||
matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block})
|
||||
except Exception, e:
|
||||
e_msg=boto_exception(e)
|
||||
module.fail_json(msg=e_msg)
|
||||
|
||||
if len(matching_vpcs) == 1:
|
||||
matched_vpc = matching_vpcs[0]
|
||||
elif len(matching_vpcs) > 1:
|
||||
if multi:
|
||||
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
|
||||
'CIDR block you specified. If you would like to create '
|
||||
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
|
||||
|
||||
return matched_vpc
|
||||
|
||||
|
||||
def update_vpc_tags(vpc, module, vpc_obj, tags, name):
|
||||
|
||||
if tags is None:
|
||||
tags = dict()
|
||||
|
||||
tags.update({'Name': name})
|
||||
try:
|
||||
current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id}))
|
||||
if cmp(tags, current_tags):
|
||||
vpc.create_tags(vpc_obj.id, tags)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
except Exception, e:
|
||||
e_msg=boto_exception(e)
|
||||
module.fail_json(msg=e_msg)
|
||||
|
||||
|
||||
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
|
||||
|
||||
if vpc_obj.dhcp_options_id != dhcp_id:
|
||||
connection.associate_dhcp_options(dhcp_id, vpc_obj.id)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_vpc_values(vpc_obj):
|
||||
|
||||
if vpc_obj is not None:
|
||||
vpc_values = vpc_obj.__dict__
|
||||
if "region" in vpc_values:
|
||||
vpc_values.pop("region")
|
||||
if "item" in vpc_values:
|
||||
vpc_values.pop("item")
|
||||
if "connection" in vpc_values:
|
||||
vpc_values.pop("connection")
|
||||
return vpc_values
|
||||
else:
|
||||
return None
|
||||
|
||||
def main():
|
||||
argument_spec=ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
name = dict(type='str', default=None, required=True),
|
||||
cidr_block = dict(type='str', default=None, required=True),
|
||||
tenancy = dict(choices=['default', 'dedicated'], default='default'),
|
||||
dns_support = dict(type='bool', default=True),
|
||||
dns_hostnames = dict(type='bool', default=True),
|
||||
dhcp_opts_id = dict(type='str', default=None, required=False),
|
||||
tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']),
|
||||
state = dict(choices=['present', 'absent'], default='present'),
|
||||
multi_ok = dict(type='bool', default=False)
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto is required for this module')
|
||||
|
||||
name=module.params.get('name')
|
||||
cidr_block=module.params.get('cidr_block')
|
||||
tenancy=module.params.get('tenancy')
|
||||
dns_support=module.params.get('dns_support')
|
||||
dns_hostnames=module.params.get('dns_hostnames')
|
||||
dhcp_id=module.params.get('dhcp_opts_id')
|
||||
tags=module.params.get('tags')
|
||||
state=module.params.get('state')
|
||||
multi=module.params.get('multi_ok')
|
||||
|
||||
changed=False
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
if dns_hostnames and not dns_support:
|
||||
module.fail_json('In order to enable DNS Hostnames you must also enable DNS support')
|
||||
|
||||
if state == 'present':
|
||||
|
||||
# Check if VPC exists
|
||||
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
|
||||
|
||||
if vpc_obj is None:
|
||||
try:
|
||||
vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy)
|
||||
changed = True
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=e)
|
||||
|
||||
if dhcp_id is not None:
|
||||
try:
|
||||
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
|
||||
changed = True
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=e)
|
||||
|
||||
if tags is not None or name is not None:
|
||||
try:
|
||||
if update_vpc_tags(connection, module, vpc_obj, tags, name):
|
||||
changed = True
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=e)
|
||||
|
||||
|
||||
# Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute
|
||||
# which is needed in order to detect the current status of DNS options. For now we just update
|
||||
# the attribute each time and is not used as a changed-factor.
|
||||
try:
|
||||
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support)
|
||||
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames)
|
||||
except BotoServerError, e:
|
||||
e_msg=boto_exception(e)
|
||||
module.fail_json(msg=e_msg)
|
||||
|
||||
# get the vpc obj again in case it has changed
|
||||
try:
|
||||
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
|
||||
except BotoServerError, e:
|
||||
e_msg=boto_exception(e)
|
||||
module.fail_json(msg=e_msg)
|
||||
|
||||
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
|
||||
|
||||
elif state == 'absent':
|
||||
|
||||
# Check if VPC exists
|
||||
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
|
||||
|
||||
if vpc_obj is not None:
|
||||
try:
|
||||
connection.delete_vpc(vpc_obj.id)
|
||||
vpc_obj = None
|
||||
changed = True
|
||||
except BotoServerError, e:
|
||||
e_msg = boto_exception(e)
|
||||
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
|
||||
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
|
||||
|
||||
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
main()
|
|
@ -42,7 +42,7 @@ options:
|
|||
description:
|
||||
- The version number of the cache engine
|
||||
required: false
|
||||
default: 1.4.14
|
||||
default: none
|
||||
node_type:
|
||||
description:
|
||||
- The compute and memory capacity of the nodes in the cache cluster
|
||||
|
@ -56,7 +56,7 @@ options:
|
|||
description:
|
||||
- The port number on which each of the cache nodes will accept connections
|
||||
required: false
|
||||
default: 11211
|
||||
default: none
|
||||
cache_subnet_group:
|
||||
description:
|
||||
- The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc
|
||||
|
@ -477,10 +477,10 @@ def main():
|
|||
state={'required': True, 'choices': ['present', 'absent', 'rebooted']},
|
||||
name={'required': True},
|
||||
engine={'required': False, 'default': 'memcached'},
|
||||
cache_engine_version={'required': False, 'default': '1.4.14'},
|
||||
cache_engine_version={'required': False},
|
||||
node_type={'required': False, 'default': 'cache.m1.small'},
|
||||
num_nodes={'required': False, 'default': None, 'type': 'int'},
|
||||
cache_port={'required': False, 'default': 11211, 'type': 'int'},
|
||||
cache_port={'required': False, 'type': 'int'},
|
||||
cache_subnet_group={'required': False, 'default': None},
|
||||
cache_security_groups={'required': False, 'default': [default],
|
||||
'type': 'list'},
|
||||
|
|
|
@ -280,12 +280,6 @@ def update_user(module, iam, name, new_name, new_path, key_state, key_count, key
|
|||
module.fail_json(changed=False, msg="Passsword doesn't conform to policy")
|
||||
else:
|
||||
module.fail_json(msg=error_msg)
|
||||
else:
|
||||
try:
|
||||
iam.delete_login_profile(name)
|
||||
changed = True
|
||||
except boto.exception.BotoServerError:
|
||||
pass
|
||||
|
||||
if key_state == 'create':
|
||||
try:
|
||||
|
@ -509,7 +503,7 @@ def main():
|
|||
groups=dict(type='list', default=None, required=False),
|
||||
state=dict(
|
||||
default=None, required=True, choices=['present', 'absent', 'update']),
|
||||
password=dict(default=None, required=False),
|
||||
password=dict(default=None, required=False, no_log=True),
|
||||
update_password=dict(default='always', required=False, choices=['always', 'on_create']),
|
||||
access_key_state=dict(default=None, required=False, choices=[
|
||||
'active', 'inactive', 'create', 'remove',
|
||||
|
|
|
@ -40,7 +40,12 @@ options:
|
|||
aliases: []
|
||||
policy_document:
|
||||
description:
|
||||
- The path to the properly json formatted policy file
|
||||
- The path to the properly json formatted policy file (mutually exclusive with C(policy_json))
|
||||
required: false
|
||||
aliases: []
|
||||
policy_json:
|
||||
description:
|
||||
- A properly json formatted policy as string (mutually exclusive with C(policy_document), see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly)
|
||||
required: false
|
||||
aliases: []
|
||||
state:
|
||||
|
@ -109,16 +114,29 @@ task:
|
|||
state: present
|
||||
with_items: new_groups.results
|
||||
|
||||
# Create a new S3 policy with prefix per user
|
||||
tasks:
|
||||
- name: Create S3 policy from template
|
||||
iam_policy:
|
||||
iam_type: user
|
||||
iam_name: "{{ item.user }}"
|
||||
policy_name: "s3_limited_access_{{ item.prefix }}"
|
||||
state: present
|
||||
policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} "
|
||||
with_items:
|
||||
- user: s3_user
|
||||
prefix: s3_user_prefix
|
||||
|
||||
'''
|
||||
import json
|
||||
import urllib
|
||||
import sys
|
||||
try:
|
||||
import boto
|
||||
import boto.iam
|
||||
import boto.ec2
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
print "failed=True msg='boto required for this module'"
|
||||
sys.exit(1)
|
||||
HAS_BOTO = False
|
||||
|
||||
def boto_exception(err):
|
||||
'''generic error message handler'''
|
||||
|
@ -271,6 +289,7 @@ def main():
|
|||
iam_name=dict(default=None, required=False),
|
||||
policy_name=dict(default=None, required=True),
|
||||
policy_document=dict(default=None, required=False),
|
||||
policy_json=dict(type='str', default=None, required=False),
|
||||
skip_duplicates=dict(type='bool', default=True, required=False)
|
||||
))
|
||||
|
||||
|
@ -278,26 +297,35 @@ def main():
|
|||
argument_spec=argument_spec,
|
||||
)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
state = module.params.get('state').lower()
|
||||
iam_type = module.params.get('iam_type').lower()
|
||||
state = module.params.get('state')
|
||||
name = module.params.get('iam_name')
|
||||
policy_name = module.params.get('policy_name')
|
||||
skip = module.params.get('skip_duplicates')
|
||||
|
||||
if module.params.get('policy_document') != None and module.params.get('policy_json') != None:
|
||||
module.fail_json(msg='Only one of "policy_document" or "policy_json" may be set')
|
||||
|
||||
if module.params.get('policy_document') != None:
|
||||
with open(module.params.get('policy_document'), 'r') as json_data:
|
||||
pdoc = json.dumps(json.load(json_data))
|
||||
json_data.close()
|
||||
elif module.params.get('policy_json') != None:
|
||||
try:
|
||||
pdoc = json.dumps(json.loads(module.params.get('policy_json')))
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e) + '\n' + module.params.get('policy_json'))
|
||||
else:
|
||||
pdoc=None
|
||||
|
||||
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
||||
|
||||
try:
|
||||
iam = boto.iam.connection.IAMConnection(
|
||||
aws_access_key_id=aws_access_key,
|
||||
aws_secret_access_key=aws_secret_key,
|
||||
)
|
||||
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
|
|
@ -24,147 +24,123 @@ description:
|
|||
options:
|
||||
command:
|
||||
description:
|
||||
- Specifies the action to take.
|
||||
- Specifies the action to take.
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'restore' ]
|
||||
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ]
|
||||
instance_name:
|
||||
description:
|
||||
- Database instance identifier. Required except when using command=facts or command=delete on just a snapshot
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
source_instance:
|
||||
description:
|
||||
- Name of the database to replicate. Used only when command=replicate.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
db_engine:
|
||||
description:
|
||||
- The type of database. Used only when command=create.
|
||||
- The type of database. Used only when command=create.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres']
|
||||
size:
|
||||
description:
|
||||
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
instance_type:
|
||||
description:
|
||||
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance.
|
||||
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
username:
|
||||
description:
|
||||
- Master database username. Used only when command=create.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
password:
|
||||
description:
|
||||
- Password for the master database username. Used only when command=create or command=modify.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
region:
|
||||
description:
|
||||
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
|
||||
required: true
|
||||
default: null
|
||||
aliases: [ 'aws_region', 'ec2_region' ]
|
||||
db_name:
|
||||
description:
|
||||
- Name of a database to create within the instance. If not specified then no database is created. Used only when command=create.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
engine_version:
|
||||
description:
|
||||
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
parameter_group:
|
||||
description:
|
||||
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
license_model:
|
||||
description:
|
||||
- The license model for this DB instance. Used only when command=create or command=restore.
|
||||
- The license model for this DB instance. Used only when command=create or command=restore.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
|
||||
multi_zone:
|
||||
description:
|
||||
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify.
|
||||
choices: [ "yes", "no" ]
|
||||
choices: [ "yes", "no" ]
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
iops:
|
||||
description:
|
||||
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
security_groups:
|
||||
description:
|
||||
- Comma separated list of one or more security groups. Used only when command=create or command=modify.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
vpc_security_groups:
|
||||
description:
|
||||
- Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
port:
|
||||
description:
|
||||
- Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
upgrade:
|
||||
description:
|
||||
- Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate.
|
||||
required: false
|
||||
default: no
|
||||
choices: [ "yes", "no" ]
|
||||
aliases: []
|
||||
option_group:
|
||||
description:
|
||||
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
maint_window:
|
||||
description:
|
||||
- "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify."
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
backup_window:
|
||||
description:
|
||||
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
backup_retention:
|
||||
description:
|
||||
- "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify."
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
zone:
|
||||
description:
|
||||
- availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore.
|
||||
|
@ -176,18 +152,15 @@ options:
|
|||
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
snapshot:
|
||||
description:
|
||||
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
aws_secret_key:
|
||||
description:
|
||||
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ 'ec2_secret_key', 'secret_key' ]
|
||||
aws_access_key:
|
||||
description:
|
||||
|
@ -201,46 +174,46 @@ options:
|
|||
required: false
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
aliases: []
|
||||
wait_timeout:
|
||||
description:
|
||||
- how long before wait gives up, in seconds
|
||||
default: 300
|
||||
aliases: []
|
||||
apply_immediately:
|
||||
description:
|
||||
- Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window.
|
||||
default: no
|
||||
choices: [ "yes", "no" ]
|
||||
aliases: []
|
||||
force_failover:
|
||||
description:
|
||||
- Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover.
|
||||
required: false
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
version_added: "2.0"
|
||||
new_instance_name:
|
||||
description:
|
||||
- Name to rename an instance to. Used only when command=modify.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: 1.5
|
||||
version_added: "1.5"
|
||||
character_set_name:
|
||||
description:
|
||||
- Associate the DB instance with a specified character set. Used with command=create.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: 1.9
|
||||
version_added: "1.9"
|
||||
publicly_accessible:
|
||||
description:
|
||||
- explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: 1.9
|
||||
version_added: "1.9"
|
||||
tags:
|
||||
description:
|
||||
- tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: 1.9
|
||||
version_added: "1.9"
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "boto"
|
||||
|
@ -292,6 +265,13 @@ EXAMPLES = '''
|
|||
instance_name: new-database
|
||||
new_instance_name: renamed-database
|
||||
wait: yes
|
||||
|
||||
# Reboot an instance and wait for it to become available again
|
||||
- rds
|
||||
command: reboot
|
||||
instance_name: database
|
||||
wait: yes
|
||||
|
||||
'''
|
||||
|
||||
import sys
|
||||
|
@ -380,6 +360,13 @@ class RDSConnection:
|
|||
except boto.exception.BotoServerError, e:
|
||||
raise RDSException(e)
|
||||
|
||||
def reboot_db_instance(self, instance_name, **params):
|
||||
try:
|
||||
result = self.connection.reboot_dbinstance(instance_name)
|
||||
return RDSDBInstance(result)
|
||||
except boto.exception.BotoServerError, e:
|
||||
raise RDSException(e)
|
||||
|
||||
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
|
||||
try:
|
||||
result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
|
||||
|
@ -464,6 +451,13 @@ class RDS2Connection:
|
|||
except boto.exception.BotoServerError, e:
|
||||
raise RDSException(e)
|
||||
|
||||
def reboot_db_instance(self, instance_name, **params):
|
||||
try:
|
||||
result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance']
|
||||
return RDS2DBInstance(result)
|
||||
except boto.exception.BotoServerError, e:
|
||||
raise RDSException(e)
|
||||
|
||||
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
|
||||
try:
|
||||
result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
|
||||
|
@ -616,16 +610,16 @@ def await_resource(conn, resource, status, module):
|
|||
while wait_timeout > time.time() and resource.status != status:
|
||||
time.sleep(5)
|
||||
if wait_timeout <= time.time():
|
||||
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
|
||||
module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name)
|
||||
if module.params.get('command') == 'snapshot':
|
||||
# Temporary until all the rds2 commands have their responses parsed
|
||||
if resource.name is None:
|
||||
module.fail_json(msg="Problem with snapshot %s" % resource.snapshot)
|
||||
module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot)
|
||||
resource = conn.get_db_snapshot(resource.name)
|
||||
else:
|
||||
# Temporary until all the rds2 commands have their responses parsed
|
||||
if resource.name is None:
|
||||
module.fail_json(msg="Problem with instance %s" % resource.instance)
|
||||
module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance)
|
||||
resource = conn.get_db_instance(resource.name)
|
||||
if resource is None:
|
||||
break
|
||||
|
@ -659,7 +653,7 @@ def create_db_instance(module, conn):
|
|||
module.params.get('username'), module.params.get('password'), **params)
|
||||
changed = True
|
||||
except RDSException, e:
|
||||
module.fail_json(msg="failed to create instance: %s" % e.message)
|
||||
module.fail_json(msg="Failed to create instance: %s" % e.message)
|
||||
|
||||
if module.params.get('wait'):
|
||||
resource = await_resource(conn, result, 'available', module)
|
||||
|
@ -686,7 +680,7 @@ def replicate_db_instance(module, conn):
|
|||
result = conn.create_db_instance_read_replica(instance_name, source_instance, **params)
|
||||
changed = True
|
||||
except RDSException, e:
|
||||
module.fail_json(msg="failed to create replica instance: %s " % e.message)
|
||||
module.fail_json(msg="Failed to create replica instance: %s " % e.message)
|
||||
|
||||
if module.params.get('wait'):
|
||||
resource = await_resource(conn, result, 'available', module)
|
||||
|
@ -715,14 +709,17 @@ def delete_db_instance_or_snapshot(module, conn):
|
|||
if instance_name:
|
||||
if snapshot:
|
||||
params["skip_final_snapshot"] = False
|
||||
params["final_snapshot_id"] = snapshot
|
||||
if has_rds2:
|
||||
params["final_db_snapshot_identifier"] = snapshot
|
||||
else:
|
||||
params["final_snapshot_id"] = snapshot
|
||||
else:
|
||||
params["skip_final_snapshot"] = True
|
||||
result = conn.delete_db_instance(instance_name, **params)
|
||||
else:
|
||||
result = conn.delete_db_snapshot(snapshot)
|
||||
except RDSException, e:
|
||||
module.fail_json(msg="failed to delete instance: %s" % e.message)
|
||||
module.fail_json(msg="Failed to delete instance: %s" % e.message)
|
||||
|
||||
# If we're not waiting for a delete to complete then we're all done
|
||||
# so just return
|
||||
|
@ -748,11 +745,11 @@ def facts_db_instance_or_snapshot(module, conn):
|
|||
snapshot = module.params.get('snapshot')
|
||||
|
||||
if instance_name and snapshot:
|
||||
module.fail_json(msg="facts must be called with either instance_name or snapshot, not both")
|
||||
module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both")
|
||||
if instance_name:
|
||||
resource = conn.get_db_instance(instance_name)
|
||||
if not resource:
|
||||
module.fail_json(msg="DB Instance %s does not exist" % instance_name)
|
||||
module.fail_json(msg="DB instance %s does not exist" % instance_name)
|
||||
if snapshot:
|
||||
resource = conn.get_db_snapshot(snapshot)
|
||||
if not resource:
|
||||
|
@ -844,6 +841,31 @@ def snapshot_db_instance(module, conn):
|
|||
module.exit_json(changed=changed, snapshot=resource.get_data())
|
||||
|
||||
|
||||
def reboot_db_instance(module, conn):
|
||||
required_vars = ['instance_name']
|
||||
valid_vars = []
|
||||
|
||||
if has_rds2:
|
||||
valid_vars.append('force_failover')
|
||||
|
||||
params = validate_parameters(required_vars, valid_vars, module)
|
||||
instance_name = module.params.get('instance_name')
|
||||
result = conn.get_db_instance(instance_name)
|
||||
changed = False
|
||||
try:
|
||||
result = conn.reboot_db_instance(instance_name, **params)
|
||||
changed = True
|
||||
except RDSException, e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
if module.params.get('wait'):
|
||||
resource = await_resource(conn, result, 'available', module)
|
||||
else:
|
||||
resource = conn.get_db_instance(instance_name)
|
||||
|
||||
module.exit_json(changed=changed, instance=resource.get_data())
|
||||
|
||||
|
||||
def restore_db_instance(module, conn):
|
||||
required_vars = ['instance_name', 'snapshot']
|
||||
valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone',
|
||||
|
@ -915,6 +937,7 @@ def validate_parameters(required_vars, valid_vars, module):
|
|||
'instance_type': 'db_instance_class',
|
||||
'password': 'master_user_password',
|
||||
'new_instance_name': 'new_db_instance_identifier',
|
||||
'force_failover': 'force_failover',
|
||||
}
|
||||
if has_rds2:
|
||||
optional_params.update(optional_params_rds2)
|
||||
|
@ -957,7 +980,7 @@ def validate_parameters(required_vars, valid_vars, module):
|
|||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'restore'], required=True),
|
||||
command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True),
|
||||
instance_name = dict(required=False),
|
||||
source_instance = dict(required=False),
|
||||
db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False),
|
||||
|
@ -989,6 +1012,7 @@ def main():
|
|||
tags = dict(type='dict', required=False),
|
||||
publicly_accessible = dict(required=False),
|
||||
character_set_name = dict(required=False),
|
||||
force_failover = dict(type='bool', required=False, default=False)
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -1007,12 +1031,13 @@ def main():
|
|||
'modify': modify_db_instance,
|
||||
'promote': promote_db_instance,
|
||||
'snapshot': snapshot_db_instance,
|
||||
'reboot': reboot_db_instance,
|
||||
'restore': restore_db_instance,
|
||||
}
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
if not region:
|
||||
module.fail_json(msg="region not specified and unable to determine region from EC2_REGION.")
|
||||
module.fail_json(msg="Region not specified. Unable to determine region from EC2_REGION.")
|
||||
|
||||
# connect to the rds endpoint
|
||||
if has_rds2:
|
||||
|
|
|
@ -24,69 +24,61 @@ description:
|
|||
options:
|
||||
command:
|
||||
description:
|
||||
- Specifies the action to take.
|
||||
- Specifies the action to take.
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
choices: [ 'get', 'create', 'delete' ]
|
||||
zone:
|
||||
description:
|
||||
- The DNS zone to modify
|
||||
required: true
|
||||
hosted_zone_id:
|
||||
description:
|
||||
- The Hosted Zone ID of the DNS zone to modify
|
||||
required: false
|
||||
version_added: "2.0"
|
||||
default: null
|
||||
aliases: []
|
||||
record:
|
||||
description:
|
||||
- The full DNS record to create or delete
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
ttl:
|
||||
description:
|
||||
- The TTL to give the new record
|
||||
required: false
|
||||
default: 3600 (one hour)
|
||||
aliases: []
|
||||
type:
|
||||
description:
|
||||
- The type of DNS record to create
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS' ]
|
||||
alias:
|
||||
description:
|
||||
- Indicates if this is an alias record.
|
||||
required: false
|
||||
version_added: 1.9
|
||||
version_added: "1.9"
|
||||
default: False
|
||||
aliases: []
|
||||
choices: [ 'True', 'False' ]
|
||||
alias_hosted_zone_id:
|
||||
description:
|
||||
- The hosted zone identifier.
|
||||
required: false
|
||||
version_added: 1.9
|
||||
version_added: "1.9"
|
||||
default: null
|
||||
aliases: []
|
||||
value:
|
||||
description:
|
||||
- The new value when creating a DNS record. Multiple comma-spaced values are allowed for non-alias records. When deleting a record all values for the record must be specified or Route53 will not delete it.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
overwrite:
|
||||
description:
|
||||
- Whether an existing record should be overwritten on create if values do not match
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
retry_interval:
|
||||
description:
|
||||
- In the case that route53 is still servicing a prior request, this module will wait and try again after this many seconds. If you have many domain names, the default of 500 seconds may be too long.
|
||||
required: false
|
||||
default: 500
|
||||
aliases: []
|
||||
private_zone:
|
||||
description:
|
||||
- If set to true, the private zone matching the requested name within the domain will be used if there are both public and private zones. The default is to use the public zone.
|
||||
|
@ -132,6 +124,13 @@ options:
|
|||
required: false
|
||||
default: null
|
||||
version_added: "2.0"
|
||||
vpc_id:
|
||||
description:
|
||||
- "When used in conjunction with private_zone: true, this will only modify records in the private hosted zone attached to this VPC."
|
||||
- This allows you to have multiple private hosted zones, all with the same name, attached to different VPCs.
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.0"
|
||||
author: "Bruce Pennypacker (@bpennypacker)"
|
||||
extends_documentation_fragment: aws
|
||||
'''
|
||||
|
@ -195,6 +194,28 @@ EXAMPLES = '''
|
|||
alias=True
|
||||
alias_hosted_zone_id="{{ elb_zone_id }}"
|
||||
|
||||
# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value
|
||||
# that the entire parameter list must be quoted:
|
||||
- route53:
|
||||
command: "create"
|
||||
zone: "foo.com"
|
||||
hosted_zone_id: "Z2AABBCCDDEEFF"
|
||||
record: "localhost.foo.com"
|
||||
type: "AAAA"
|
||||
ttl: "7200"
|
||||
value: "::1"
|
||||
|
||||
# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value
|
||||
# that the entire parameter list must be quoted:
|
||||
- route53:
|
||||
command: "create"
|
||||
zone: "foo.com"
|
||||
hosted_zone_id: "Z2AABBCCDDEEFF"
|
||||
record: "localhost.foo.com"
|
||||
type: "AAAA"
|
||||
ttl: "7200"
|
||||
value: "::1"
|
||||
|
||||
# Use a routing policy to distribute traffic:
|
||||
- route53:
|
||||
command: "create"
|
||||
|
@ -222,14 +243,26 @@ try:
|
|||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
def get_zone_by_name(conn, module, zone_name, want_private):
|
||||
"""Finds a zone by name"""
|
||||
def get_zone_by_name(conn, module, zone_name, want_private, zone_id, want_vpc_id):
|
||||
"""Finds a zone by name or zone_id"""
|
||||
for zone in conn.get_zones():
|
||||
# only save this zone id if the private status of the zone matches
|
||||
# the private_zone_in boolean specified in the params
|
||||
private_zone = module.boolean(zone.config.get('PrivateZone', False))
|
||||
if private_zone == want_private and zone.name == zone_name:
|
||||
return zone
|
||||
if private_zone == want_private and ((zone.name == zone_name and zone_id == None) or zone.id.replace('/hostedzone/', '') == zone_id):
|
||||
if want_vpc_id:
|
||||
# NOTE: These details aren't available in other boto methods, hence the necessary
|
||||
# extra API call
|
||||
zone_details = conn.get_hosted_zone(zone.id)['GetHostedZoneResponse']
|
||||
# this is to deal with this boto bug: https://github.com/boto/boto/pull/2882
|
||||
if isinstance(zone_details['VPCs'], dict):
|
||||
if zone_details['VPCs']['VPC']['VPCId'] == want_vpc_id:
|
||||
return zone
|
||||
else: # Forward compatibility for when boto fixes that bug
|
||||
if want_vpc_id in [v['VPCId'] for v in zone_details['VPCs']]:
|
||||
return zone
|
||||
else:
|
||||
return zone
|
||||
return None
|
||||
|
||||
|
||||
|
@ -252,6 +285,7 @@ def main():
|
|||
argument_spec.update(dict(
|
||||
command = dict(choices=['get', 'create', 'delete'], required=True),
|
||||
zone = dict(required=True),
|
||||
hosted_zone_id = dict(required=False, default=None),
|
||||
record = dict(required=True),
|
||||
ttl = dict(required=False, type='int', default=3600),
|
||||
type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True),
|
||||
|
@ -266,6 +300,7 @@ def main():
|
|||
region = dict(required=False),
|
||||
health_check = dict(required=False),
|
||||
failover = dict(required=False),
|
||||
vpc_id = dict(required=False),
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
@ -275,6 +310,7 @@ def main():
|
|||
|
||||
command_in = module.params.get('command')
|
||||
zone_in = module.params.get('zone').lower()
|
||||
hosted_zone_id_in = module.params.get('hosted_zone_id')
|
||||
ttl_in = module.params.get('ttl')
|
||||
record_in = module.params.get('record').lower()
|
||||
type_in = module.params.get('type')
|
||||
|
@ -288,6 +324,7 @@ def main():
|
|||
region_in = module.params.get('region')
|
||||
health_check_in = module.params.get('health_check')
|
||||
failover_in = module.params.get('failover')
|
||||
vpc_id_in = module.params.get('vpc_id')
|
||||
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
||||
|
||||
|
@ -314,6 +351,11 @@ def main():
|
|||
elif not alias_hosted_zone_id_in:
|
||||
module.fail_json(msg = "parameter 'alias_hosted_zone_id' required for alias create/delete")
|
||||
|
||||
if vpc_id_in and not private_zone_in:
|
||||
module.fail_json(msg="parameter 'private_zone' must be true when specifying parameter"
|
||||
" 'vpc_id'")
|
||||
|
||||
|
||||
# connect to the route53 endpoint
|
||||
try:
|
||||
conn = Route53Connection(**aws_connect_kwargs)
|
||||
|
@ -321,7 +363,7 @@ def main():
|
|||
module.fail_json(msg = e.error_message)
|
||||
|
||||
# Find the named zone ID
|
||||
zone = get_zone_by_name(conn, module, zone_in, private_zone_in)
|
||||
zone = get_zone_by_name(conn, module, zone_in, private_zone_in, hosted_zone_id_in, vpc_id_in)
|
||||
|
||||
# Verify that the requested zone is already defined in Route53
|
||||
if zone is None:
|
||||
|
@ -355,11 +397,15 @@ def main():
|
|||
record['ttl'] = rset.ttl
|
||||
record['value'] = ','.join(sorted(rset.resource_records))
|
||||
record['values'] = sorted(rset.resource_records)
|
||||
if hosted_zone_id_in:
|
||||
record['hosted_zone_id'] = hosted_zone_id_in
|
||||
record['identifier'] = rset.identifier
|
||||
record['weight'] = rset.weight
|
||||
record['region'] = rset.region
|
||||
record['failover'] = rset.failover
|
||||
record['health_check'] = rset.health_check
|
||||
if hosted_zone_id_in:
|
||||
record['hosted_zone_id'] = hosted_zone_id_in
|
||||
if rset.alias_dns_name:
|
||||
record['alias'] = True
|
||||
record['value'] = rset.alias_dns_name
|
||||
|
@ -374,7 +420,13 @@ def main():
|
|||
break
|
||||
|
||||
if command_in == 'get':
|
||||
module.exit_json(changed=False, set=record)
|
||||
if type_in == 'NS':
|
||||
ns = record['values']
|
||||
else:
|
||||
# Retrieve name servers associated to the zone.
|
||||
ns = conn.get_zone(zone_in).get_nameservers()
|
||||
|
||||
module.exit_json(changed=False, set=record, nameservers=ns)
|
||||
|
||||
if command_in == 'delete' and not found_record:
|
||||
module.exit_json(changed=False)
|
||||
|
|
|
@ -35,7 +35,8 @@ options:
|
|||
default: null
|
||||
aliases: ['ec2_secret_key', 'secret_key']
|
||||
bucket:
|
||||
description: Bucket name.
|
||||
description:
|
||||
- Bucket name.
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
|
@ -50,12 +51,31 @@ options:
|
|||
- When set for PUT mode, asks for server-side encryption
|
||||
required: false
|
||||
default: no
|
||||
version_added: "2.0"
|
||||
expiration:
|
||||
description:
|
||||
- Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation.
|
||||
required: false
|
||||
default: 600
|
||||
aliases: []
|
||||
headers:
|
||||
description:
|
||||
- Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.0"
|
||||
marker:
|
||||
description:
|
||||
- Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.0"
|
||||
max_keys:
|
||||
description:
|
||||
- Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
|
||||
required: false
|
||||
default: 1000
|
||||
version_added: "2.0"
|
||||
metadata:
|
||||
description:
|
||||
- Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
|
||||
|
@ -64,7 +84,7 @@ options:
|
|||
version_added: "1.6"
|
||||
mode:
|
||||
description:
|
||||
- Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket), delete (bucket), and delobj (delete object).
|
||||
- Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys (2.0+)), create (bucket), delete (bucket), and delobj (delete object).
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
|
@ -73,6 +93,18 @@ options:
|
|||
- Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
|
||||
required: false
|
||||
default: null
|
||||
permission:
|
||||
description:
|
||||
- This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'public-read-write', 'authenticated-read'. Multiple permissions can be specified as a list.
|
||||
required: false
|
||||
default: private
|
||||
version_added: "2.0"
|
||||
prefix:
|
||||
description:
|
||||
- Limits the response to keys that begin with the specified prefix for list mode
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.0"
|
||||
version:
|
||||
description:
|
||||
- Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
|
||||
|
@ -99,18 +131,20 @@ options:
|
|||
default: 0
|
||||
version_added: "2.0"
|
||||
s3_url:
|
||||
description: S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS
|
||||
description:
|
||||
- S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS
|
||||
default: null
|
||||
aliases: [ S3_URL ]
|
||||
src:
|
||||
description: The source file path when performing a PUT operation.
|
||||
description:
|
||||
- The source file path when performing a PUT operation.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: "1.3"
|
||||
|
||||
requirements: [ "boto" ]
|
||||
author:
|
||||
author:
|
||||
- "Lester Wade (@lwade)"
|
||||
- "Ralph Tice (@ralph-tice)"
|
||||
extends_documentation_fragment: aws
|
||||
|
@ -129,8 +163,17 @@ EXAMPLES = '''
|
|||
# PUT/upload with metadata
|
||||
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache'
|
||||
|
||||
# PUT/upload with custom headers
|
||||
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put headers=x-amz-grant-full-control=emailAddress=owner@example.com
|
||||
|
||||
# List keys simple
|
||||
- s3: bucket=mybucket mode=list
|
||||
|
||||
# List keys all options
|
||||
- s3: bucket=mybucket mode=list prefix=/my/desired/ marker=/my/desired/0023.txt max_keys=472
|
||||
|
||||
# Create an empty bucket
|
||||
- s3: bucket=mybucket mode=create
|
||||
- s3: bucket=mybucket mode=create permission=public-read
|
||||
|
||||
# Create a bucket with key as directory, in the EU region
|
||||
- s3: bucket=mybucket object=/my/directory/path mode=create region=eu-west-1
|
||||
|
@ -138,7 +181,7 @@ EXAMPLES = '''
|
|||
# Delete a bucket and all contents
|
||||
- s3: bucket=mybucket mode=delete
|
||||
|
||||
# GET an object but dont download if the file checksums match
|
||||
# GET an object but dont download if the file checksums match
|
||||
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get overwrite=different
|
||||
|
||||
# Delete an object from a bucket
|
||||
|
@ -147,7 +190,6 @@ EXAMPLES = '''
|
|||
|
||||
import os
|
||||
import urlparse
|
||||
import hashlib
|
||||
from ssl import SSLError
|
||||
|
||||
try:
|
||||
|
@ -156,6 +198,7 @@ try:
|
|||
from boto.s3.connection import Location
|
||||
from boto.s3.connection import OrdinaryCallingFormat
|
||||
from boto.s3.connection import S3Connection
|
||||
from boto.s3.acl import CannedACLStrings
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
@ -200,11 +243,26 @@ def create_bucket(module, s3, bucket, location=None):
|
|||
location = Location.DEFAULT
|
||||
try:
|
||||
bucket = s3.create_bucket(bucket, location=location)
|
||||
for acl in module.params.get('permission'):
|
||||
bucket.set_acl(acl)
|
||||
except s3.provider.storage_response_error, e:
|
||||
module.fail_json(msg= str(e))
|
||||
if bucket:
|
||||
return True
|
||||
|
||||
def get_bucket(module, s3, bucket):
|
||||
try:
|
||||
return s3.lookup(bucket)
|
||||
except s3.provider.storage_response_error, e:
|
||||
module.fail_json(msg= str(e))
|
||||
|
||||
def list_keys(module, bucket_object, prefix, marker, max_keys):
|
||||
all_keys = bucket_object.get_all_keys(prefix=prefix, marker=marker, max_keys=max_keys)
|
||||
|
||||
keys = [x.key for x in all_keys]
|
||||
|
||||
module.exit_json(msg="LIST operation complete", s3_keys=keys)
|
||||
|
||||
def delete_bucket(module, s3, bucket):
|
||||
try:
|
||||
bucket = s3.lookup(bucket)
|
||||
|
@ -232,15 +290,6 @@ def create_dirkey(module, s3, bucket, obj):
|
|||
except s3.provider.storage_response_error, e:
|
||||
module.fail_json(msg= str(e))
|
||||
|
||||
def upload_file_check(src):
|
||||
if os.path.exists(src):
|
||||
file_exists is True
|
||||
else:
|
||||
file_exists is False
|
||||
if os.path.isdir(src):
|
||||
module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True)
|
||||
return file_exists
|
||||
|
||||
def path_check(path):
|
||||
if os.path.exists(path):
|
||||
return True
|
||||
|
@ -248,7 +297,7 @@ def path_check(path):
|
|||
return False
|
||||
|
||||
|
||||
def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt):
|
||||
def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers):
|
||||
try:
|
||||
bucket = s3.lookup(bucket)
|
||||
key = bucket.new_key(obj)
|
||||
|
@ -256,7 +305,9 @@ def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt):
|
|||
for meta_key in metadata.keys():
|
||||
key.set_metadata(meta_key, metadata[meta_key])
|
||||
|
||||
key.set_contents_from_filename(src, encrypt_key=encrypt)
|
||||
key.set_contents_from_filename(src, encrypt_key=encrypt, headers=headers)
|
||||
for acl in module.params.get('permission'):
|
||||
key.set_acl(acl)
|
||||
url = key.generate_url(expiry)
|
||||
module.exit_json(msg="PUT operation complete", url=url, changed=True)
|
||||
except s3.provider.storage_copy_error, e:
|
||||
|
@ -315,13 +366,6 @@ def is_walrus(s3_url):
|
|||
else:
|
||||
return False
|
||||
|
||||
def get_md5_digest(local_file):
|
||||
md5 = hashlib.md5()
|
||||
with open(local_file, 'rb') as f:
|
||||
for data in f.read(1024 ** 2):
|
||||
md5.update(data)
|
||||
return md5.hexdigest()
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
|
@ -330,11 +374,16 @@ def main():
|
|||
dest = dict(default=None),
|
||||
encrypt = dict(default=True, type='bool'),
|
||||
expiry = dict(default=600, aliases=['expiration']),
|
||||
headers = dict(type='dict'),
|
||||
marker = dict(default=None),
|
||||
max_keys = dict(default=1000),
|
||||
metadata = dict(type='dict'),
|
||||
mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj'], required=True),
|
||||
mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
|
||||
object = dict(),
|
||||
permission = dict(type='list', default=['private']),
|
||||
version = dict(default=None),
|
||||
overwrite = dict(aliases=['force'], default='always'),
|
||||
prefix = dict(default=None),
|
||||
retries = dict(aliases=['retry'], type='int', default=0),
|
||||
s3_url = dict(aliases=['S3_URL']),
|
||||
src = dict(),
|
||||
|
@ -350,25 +399,33 @@ def main():
|
|||
expiry = int(module.params['expiry'])
|
||||
if module.params.get('dest'):
|
||||
dest = os.path.expanduser(module.params.get('dest'))
|
||||
headers = module.params.get('headers')
|
||||
marker = module.params.get('marker')
|
||||
max_keys = module.params.get('max_keys')
|
||||
metadata = module.params.get('metadata')
|
||||
mode = module.params.get('mode')
|
||||
obj = module.params.get('object')
|
||||
version = module.params.get('version')
|
||||
overwrite = module.params.get('overwrite')
|
||||
prefix = module.params.get('prefix')
|
||||
retries = module.params.get('retries')
|
||||
s3_url = module.params.get('s3_url')
|
||||
src = module.params.get('src')
|
||||
|
||||
if overwrite not in ['always', 'never', 'different']:
|
||||
if module.boolean(overwrite):
|
||||
overwrite = 'always'
|
||||
else:
|
||||
for acl in module.params.get('permission'):
|
||||
if acl not in CannedACLStrings:
|
||||
module.fail_json(msg='Unknown permission specified: %s' % str(acl))
|
||||
|
||||
if overwrite not in ['always', 'never', 'different']:
|
||||
if module.boolean(overwrite):
|
||||
overwrite = 'always'
|
||||
else:
|
||||
overwrite='never'
|
||||
|
||||
if overwrite not in ['always', 'never', 'different']:
|
||||
if module.boolean(overwrite):
|
||||
overwrite = 'always'
|
||||
else:
|
||||
if overwrite not in ['always', 'never', 'different']:
|
||||
if module.boolean(overwrite):
|
||||
overwrite = 'always'
|
||||
else:
|
||||
overwrite='never'
|
||||
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
||||
|
@ -388,6 +445,12 @@ def main():
|
|||
if not s3_url and 'S3_URL' in os.environ:
|
||||
s3_url = os.environ['S3_URL']
|
||||
|
||||
# bucket names with .'s in them need to use the calling_format option,
|
||||
# otherwise the connection will fail. See https://github.com/boto/boto/issues/2836
|
||||
# for more details.
|
||||
if '.' in bucket:
|
||||
aws_connect_kwargs['calling_format'] = OrdinaryCallingFormat()
|
||||
|
||||
# Look at s3_url and tweak connection settings
|
||||
# if connecting to Walrus or fakes3
|
||||
try:
|
||||
|
@ -404,7 +467,7 @@ def main():
|
|||
walrus = urlparse.urlparse(s3_url).hostname
|
||||
s3 = boto.connect_walrus(walrus, **aws_connect_kwargs)
|
||||
else:
|
||||
s3 = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_kwargs)
|
||||
s3 = boto.s3.connect_to_region(location, is_secure=True, **aws_connect_kwargs)
|
||||
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
|
||||
if s3 is None:
|
||||
s3 = boto.connect_s3(**aws_connect_kwargs)
|
||||
|
@ -433,16 +496,15 @@ def main():
|
|||
else:
|
||||
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
|
||||
|
||||
# If the destination path doesn't exist, no need to md5um etag check, so just download.
|
||||
# If the destination path doesn't exist or overwrite is True, no need to do the md5um etag check, so just download.
|
||||
pathrtn = path_check(dest)
|
||||
if pathrtn is False:
|
||||
if pathrtn is False or overwrite == 'always':
|
||||
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
|
||||
|
||||
# Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists.
|
||||
if pathrtn is True:
|
||||
md5_remote = keysum(module, s3, bucket, obj, version=version)
|
||||
md5_local = get_md5_digest(dest)
|
||||
|
||||
md5_local = module.md5(dest)
|
||||
if md5_local == md5_remote:
|
||||
sum_matches = True
|
||||
if overwrite == 'always':
|
||||
|
@ -461,10 +523,6 @@ def main():
|
|||
if sum_matches is True and overwrite == 'never':
|
||||
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False)
|
||||
|
||||
# At this point explicitly define the overwrite condition.
|
||||
if sum_matches is True and pathrtn is True and overwrite == 'always':
|
||||
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
|
||||
|
||||
# if our mode is a PUT operation (upload), go through the procedure as appropriate ...
|
||||
if mode == 'put':
|
||||
|
||||
|
@ -485,29 +543,29 @@ def main():
|
|||
# Lets check key state. Does it exist and if it does, compute the etag md5sum.
|
||||
if bucketrtn is True and keyrtn is True:
|
||||
md5_remote = keysum(module, s3, bucket, obj)
|
||||
md5_local = get_md5_digest(src)
|
||||
md5_local = module.md5(src)
|
||||
|
||||
if md5_local == md5_remote:
|
||||
sum_matches = True
|
||||
if overwrite == 'always':
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt)
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
|
||||
else:
|
||||
get_download_url(module, s3, bucket, obj, expiry, changed=False)
|
||||
else:
|
||||
sum_matches = False
|
||||
if overwrite in ('always', 'different'):
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt)
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
|
||||
else:
|
||||
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.")
|
||||
|
||||
# If neither exist (based on bucket existence), we can create both.
|
||||
if bucketrtn is False and pathrtn is True:
|
||||
create_bucket(module, s3, bucket, location)
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt)
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
|
||||
|
||||
# If bucket exists but key doesn't, just upload.
|
||||
if bucketrtn is True and pathrtn is True and keyrtn is False:
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt)
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
|
||||
|
||||
# Delete an object from a bucket, not the entire bucket
|
||||
if mode == 'delobj':
|
||||
|
@ -538,6 +596,16 @@ def main():
|
|||
else:
|
||||
module.fail_json(msg="Bucket parameter is required.", failed=True)
|
||||
|
||||
# Support for listing a set of keys
|
||||
if mode == 'list':
|
||||
bucket_object = get_bucket(module, s3, bucket)
|
||||
|
||||
# If the bucket does not exist then bail out
|
||||
if bucket_object is None:
|
||||
module.fail_json(msg="Target bucket (%s) cannot be found"% bucket, failed=True)
|
||||
|
||||
list_keys(module, bucket_object, prefix, marker, max_keys)
|
||||
|
||||
# Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
|
||||
# WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
|
||||
if mode == 'create':
|
||||
|
|
|
@ -567,8 +567,8 @@ def main():
|
|||
module.fail_json(msg='location parameter is required for new instance')
|
||||
if not module.params.get('storage_account'):
|
||||
module.fail_json(msg='storage_account parameter is required for new instance')
|
||||
if not module.params.get('password'):
|
||||
module.fail_json(msg='password parameter is required for new instance')
|
||||
if not (module.params.get('password') or module.params.get('ssh_cert_path')):
|
||||
module.fail_json(msg='password or ssh_cert_path parameter is required for new instance')
|
||||
(changed, public_dns_name, deployment) = create_virtual_machine(module, azure)
|
||||
|
||||
module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__)))
|
||||
|
|
|
@ -109,6 +109,14 @@ options:
|
|||
- none
|
||||
- syslog
|
||||
version_added: "2.0"
|
||||
log_opt:
|
||||
description:
|
||||
- Additional options to pass to the logging driver selected above. See Docker log-driver
|
||||
documentation for more information (https://docs.docker.com/reference/logging/overview/).
|
||||
Requires docker >=1.7.0.
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.0"
|
||||
memory_limit:
|
||||
description:
|
||||
- RAM allocated to the container as a number of bytes or as a human-readable
|
||||
|
@ -160,6 +168,12 @@ options:
|
|||
specified by docker-py.
|
||||
default: docker-py default remote API version
|
||||
version_added: "1.8"
|
||||
docker_user:
|
||||
description:
|
||||
- Username or UID to use within the container
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.0"
|
||||
username:
|
||||
description:
|
||||
- Remote API username.
|
||||
|
@ -191,8 +205,16 @@ options:
|
|||
default: null
|
||||
detach:
|
||||
description:
|
||||
- Enable detached mode to leave the container running in background.
|
||||
- Enable detached mode to leave the container running in background. If
|
||||
disabled, fail unless the process exits cleanly.
|
||||
default: true
|
||||
signal:
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- With the state "killed", you can alter the signal sent to the
|
||||
container.
|
||||
required: false
|
||||
default: KILL
|
||||
state:
|
||||
description:
|
||||
- Assert the container's desired state. "present" only asserts that the
|
||||
|
@ -251,6 +273,12 @@ options:
|
|||
default: DockerHub
|
||||
aliases: []
|
||||
version_added: "1.8"
|
||||
read_only:
|
||||
description:
|
||||
- Mount the container's root filesystem as read only
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: "2.0"
|
||||
restart_policy:
|
||||
description:
|
||||
- Container restart policy.
|
||||
|
@ -264,6 +292,7 @@ options:
|
|||
default: 0
|
||||
version_added: "1.9"
|
||||
extra_hosts:
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- Dict of custom host-to-IP mappings to be defined in the container
|
||||
insecure_registry:
|
||||
|
@ -272,8 +301,26 @@ options:
|
|||
docker-py >= 0.5.0.
|
||||
default: false
|
||||
version_added: "1.9"
|
||||
|
||||
author:
|
||||
cpu_set:
|
||||
description:
|
||||
- CPUs in which to allow execution. Requires docker-py >= 0.6.0.
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.0"
|
||||
cap_add:
|
||||
description:
|
||||
- Add capabilities for the container. Requires docker-py >= 0.5.0.
|
||||
required: false
|
||||
default: false
|
||||
version_added: "2.0"
|
||||
cap_drop:
|
||||
description:
|
||||
- Drop capabilities for the container. Requires docker-py >= 0.5.0.
|
||||
required: false
|
||||
default: false
|
||||
aliases: []
|
||||
version_added: "2.0"
|
||||
author:
|
||||
- "Cove Schneider (@cove)"
|
||||
- "Joshua Conner (@joshuaconner)"
|
||||
- "Pavel Antonov (@softzilla)"
|
||||
|
@ -376,9 +423,23 @@ EXAMPLES = '''
|
|||
name: ohno
|
||||
image: someuser/oldandbusted
|
||||
state: absent
|
||||
|
||||
# Example Syslogging Output
|
||||
|
||||
- name: myservice container
|
||||
docker:
|
||||
name: myservice
|
||||
image: someservice/someimage
|
||||
state: reloaded
|
||||
log_driver: syslog
|
||||
log_opt:
|
||||
syslog-address: tcp://my-syslog-server:514
|
||||
syslog-facility: daemon
|
||||
syslog-tag: myservice
|
||||
'''
|
||||
|
||||
HAS_DOCKER_PY = True
|
||||
DEFAULT_DOCKER_API_VERSION = None
|
||||
|
||||
import sys
|
||||
import json
|
||||
|
@ -388,6 +449,7 @@ from urlparse import urlparse
|
|||
try:
|
||||
import docker.client
|
||||
import docker.utils
|
||||
import docker.errors
|
||||
from requests.exceptions import RequestException
|
||||
except ImportError:
|
||||
HAS_DOCKER_PY = False
|
||||
|
@ -480,6 +542,7 @@ def get_docker_py_versioninfo():
|
|||
if not char.isdigit():
|
||||
nondigit = part[idx:]
|
||||
digit = part[:idx]
|
||||
break
|
||||
if digit:
|
||||
version.append(int(digit))
|
||||
if nondigit:
|
||||
|
@ -528,6 +591,12 @@ class DockerManager(object):
|
|||
'extra_hosts': ((0, 7, 0), '1.3.1'),
|
||||
'pid': ((1, 0, 0), '1.17'),
|
||||
'log_driver': ((1, 2, 0), '1.18'),
|
||||
'log_opt': ((1, 2, 0), '1.18'),
|
||||
'host_config': ((0, 7, 0), '1.15'),
|
||||
'cpu_set': ((0, 6, 0), '1.14'),
|
||||
'cap_add': ((0, 5, 0), '1.14'),
|
||||
'cap_drop': ((0, 5, 0), '1.14'),
|
||||
'read_only': ((1, 0, 0), '1.17'),
|
||||
# Clientside only
|
||||
'insecure_registry': ((0, 5, 0), '0.0')
|
||||
}
|
||||
|
@ -539,24 +608,26 @@ class DockerManager(object):
|
|||
self.volumes = None
|
||||
if self.module.params.get('volumes'):
|
||||
self.binds = {}
|
||||
self.volumes = {}
|
||||
self.volumes = []
|
||||
vols = self.module.params.get('volumes')
|
||||
for vol in vols:
|
||||
parts = vol.split(":")
|
||||
# regular volume
|
||||
if len(parts) == 1:
|
||||
self.volumes.append(parts[0])
|
||||
# host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container)
|
||||
if len(parts) == 2:
|
||||
self.volumes[parts[1]] = {}
|
||||
self.binds[parts[0]] = parts[1]
|
||||
# with bind mode
|
||||
elif len(parts) == 3:
|
||||
if parts[2] not in ['ro', 'rw']:
|
||||
self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"')
|
||||
ro = parts[2] == 'ro'
|
||||
self.volumes[parts[1]] = {}
|
||||
self.binds[parts[0]] = {'bind': parts[1], 'ro': ro}
|
||||
# docker mount (e.g. /www, mounts a docker volume /www on the container at the same location)
|
||||
elif 2 <= len(parts) <= 3:
|
||||
# default to read-write
|
||||
ro = False
|
||||
# with supplied bind mode
|
||||
if len(parts) == 3:
|
||||
if parts[2] not in ['ro', 'rw']:
|
||||
self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"')
|
||||
else:
|
||||
ro = parts[2] == 'ro'
|
||||
self.binds[parts[0]] = {'bind': parts[1], 'ro': ro }
|
||||
else:
|
||||
self.volumes[parts[0]] = {}
|
||||
self.module.fail_json(msg='volumes support 1 to 3 arguments')
|
||||
|
||||
self.lxc_conf = None
|
||||
if self.module.params.get('lxc_conf'):
|
||||
|
@ -735,6 +806,82 @@ class DockerManager(object):
|
|||
else:
|
||||
return None
|
||||
|
||||
def get_start_params(self):
|
||||
"""
|
||||
Create start params
|
||||
"""
|
||||
params = {
|
||||
'lxc_conf': self.lxc_conf,
|
||||
'binds': self.binds,
|
||||
'port_bindings': self.port_bindings,
|
||||
'publish_all_ports': self.module.params.get('publish_all_ports'),
|
||||
'privileged': self.module.params.get('privileged'),
|
||||
'links': self.links,
|
||||
'network_mode': self.module.params.get('net'),
|
||||
}
|
||||
|
||||
optionals = {}
|
||||
for optional_param in ('dns', 'volumes_from', 'restart_policy',
|
||||
'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver',
|
||||
'cap_add', 'cap_drop', 'read_only', 'log_opt'):
|
||||
optionals[optional_param] = self.module.params.get(optional_param)
|
||||
|
||||
if optionals['dns'] is not None:
|
||||
self.ensure_capability('dns')
|
||||
params['dns'] = optionals['dns']
|
||||
|
||||
if optionals['volumes_from'] is not None:
|
||||
self.ensure_capability('volumes_from')
|
||||
params['volumes_from'] = optionals['volumes_from']
|
||||
|
||||
if optionals['restart_policy'] is not None:
|
||||
self.ensure_capability('restart_policy')
|
||||
params['restart_policy'] = { 'Name': optionals['restart_policy'] }
|
||||
if params['restart_policy']['Name'] == 'on-failure':
|
||||
params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry']
|
||||
|
||||
# docker_py only accepts 'host' or None
|
||||
if 'pid' in optionals and not optionals['pid']:
|
||||
optionals['pid'] = None
|
||||
|
||||
if optionals['pid'] is not None:
|
||||
self.ensure_capability('pid')
|
||||
params['pid_mode'] = optionals['pid']
|
||||
|
||||
if optionals['extra_hosts'] is not None:
|
||||
self.ensure_capability('extra_hosts')
|
||||
params['extra_hosts'] = optionals['extra_hosts']
|
||||
|
||||
if optionals['log_driver'] is not None:
|
||||
self.ensure_capability('log_driver')
|
||||
log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON)
|
||||
if optionals['log_opt'] is not None:
|
||||
for k, v in optionals['log_opt'].iteritems():
|
||||
log_config.set_config_value(k, v)
|
||||
log_config.type = optionals['log_driver']
|
||||
params['log_config'] = log_config
|
||||
|
||||
if optionals['cap_add'] is not None:
|
||||
self.ensure_capability('cap_add')
|
||||
params['cap_add'] = optionals['cap_add']
|
||||
|
||||
if optionals['cap_drop'] is not None:
|
||||
self.ensure_capability('cap_drop')
|
||||
params['cap_drop'] = optionals['cap_drop']
|
||||
|
||||
if optionals['read_only'] is not None:
|
||||
self.ensure_capability('read_only')
|
||||
params['read_only'] = optionals['read_only']
|
||||
|
||||
return params
|
||||
|
||||
def create_host_config(self):
|
||||
"""
|
||||
Create HostConfig object
|
||||
"""
|
||||
params = self.get_start_params()
|
||||
return docker.utils.create_host_config(**params)
|
||||
|
||||
def get_port_bindings(self, ports):
|
||||
"""
|
||||
Parse the `ports` string into a port bindings dict for the `start_container` call.
|
||||
|
@ -871,6 +1018,9 @@ class DockerManager(object):
|
|||
running = self.get_running_containers()
|
||||
current = self.get_inspect_containers(running)
|
||||
|
||||
#Get API version
|
||||
api_version = self.client.version()['ApiVersion']
|
||||
|
||||
image = self.get_inspect_image()
|
||||
if image is None:
|
||||
# The image isn't present. Assume that we're about to pull a new
|
||||
|
@ -921,7 +1071,7 @@ class DockerManager(object):
|
|||
|
||||
expected_volume_keys = set((image['ContainerConfig']['Volumes'] or {}).keys())
|
||||
if self.volumes:
|
||||
expected_volume_keys.update(self.volumes.keys())
|
||||
expected_volume_keys.update(self.volumes)
|
||||
|
||||
actual_volume_keys = set((container['Config']['Volumes'] or {}).keys())
|
||||
|
||||
|
@ -937,7 +1087,11 @@ class DockerManager(object):
|
|||
except ValueError as e:
|
||||
self.module.fail_json(msg=str(e))
|
||||
|
||||
actual_mem = container['Config']['Memory']
|
||||
#For v1.19 API and above use HostConfig, otherwise use Config
|
||||
if api_version >= 1.19:
|
||||
actual_mem = container['HostConfig']['Memory']
|
||||
else:
|
||||
actual_mem = container['Config']['Memory']
|
||||
|
||||
if expected_mem and actual_mem != expected_mem:
|
||||
self.reload_reasons.append('memory ({0} => {1})'.format(actual_mem, expected_mem))
|
||||
|
@ -1063,15 +1217,14 @@ class DockerManager(object):
|
|||
for container_port, config in self.port_bindings.iteritems():
|
||||
if isinstance(container_port, int):
|
||||
container_port = "{0}/tcp".format(container_port)
|
||||
bind = {}
|
||||
if len(config) == 1:
|
||||
bind['HostIp'] = "0.0.0.0"
|
||||
bind['HostPort'] = ""
|
||||
expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}]
|
||||
elif isinstance(config[0], tuple):
|
||||
expected_bound_ports[container_port] = []
|
||||
for hostip, hostport in config:
|
||||
expected_bound_ports[container_port].append({ 'HostIp': hostip, 'HostPort': str(hostport)})
|
||||
else:
|
||||
bind['HostIp'] = config[0]
|
||||
bind['HostPort'] = str(config[1])
|
||||
|
||||
expected_bound_ports[container_port] = [bind]
|
||||
expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}]
|
||||
|
||||
actual_bound_ports = container['HostConfig']['PortBindings'] or {}
|
||||
|
||||
|
@ -1108,8 +1261,8 @@ class DockerManager(object):
|
|||
|
||||
# NETWORK MODE
|
||||
|
||||
expected_netmode = self.module.params.get('net') or ''
|
||||
actual_netmode = container['HostConfig']['NetworkMode']
|
||||
expected_netmode = self.module.params.get('net') or 'bridge'
|
||||
actual_netmode = container['HostConfig']['NetworkMode'] or 'bridge'
|
||||
if actual_netmode != expected_netmode:
|
||||
self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode))
|
||||
differing.append(container)
|
||||
|
@ -1134,7 +1287,7 @@ class DockerManager(object):
|
|||
|
||||
# LOG_DRIVER
|
||||
|
||||
if self.ensure_capability('log_driver', False) :
|
||||
if self.ensure_capability('log_driver', False):
|
||||
expected_log_driver = self.module.params.get('log_driver') or 'json-file'
|
||||
actual_log_driver = container['HostConfig']['LogConfig']['Type']
|
||||
if actual_log_driver != expected_log_driver:
|
||||
|
@ -1142,6 +1295,17 @@ class DockerManager(object):
|
|||
differing.append(container)
|
||||
continue
|
||||
|
||||
if self.ensure_capability('log_opt', False):
|
||||
expected_logging_opts = self.module.params.get('log_opt') or {}
|
||||
actual_log_opts = container['HostConfig']['LogConfig']['Config']
|
||||
if len(set(expected_logging_opts.items()) - set(actual_log_opts.items())) != 0:
|
||||
log_opt_reasons = {
|
||||
'added': dict(set(expected_logging_opts.items()) - set(actual_log_opts.items())),
|
||||
'removed': dict(set(actual_log_opts.items()) - set(expected_logging_opts.items()))
|
||||
}
|
||||
self.reload_reasons.append('log_opt ({0})'.format(log_opt_reasons))
|
||||
differing.append(container)
|
||||
|
||||
return differing
|
||||
|
||||
def get_deployed_containers(self):
|
||||
|
@ -1238,63 +1402,17 @@ class DockerManager(object):
|
|||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e))
|
||||
|
||||
def create_host_config(self):
|
||||
params = {
|
||||
'lxc_conf': self.lxc_conf,
|
||||
'binds': self.binds,
|
||||
'port_bindings': self.port_bindings,
|
||||
'publish_all_ports': self.module.params.get('publish_all_ports'),
|
||||
'privileged': self.module.params.get('privileged'),
|
||||
'links': self.links,
|
||||
'network_mode': self.module.params.get('net'),
|
||||
}
|
||||
|
||||
optionals = {}
|
||||
for optional_param in ('dns', 'volumes_from', 'restart_policy',
|
||||
'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver'):
|
||||
optionals[optional_param] = self.module.params.get(optional_param)
|
||||
|
||||
if optionals['dns'] is not None:
|
||||
self.ensure_capability('dns')
|
||||
params['dns'] = optionals['dns']
|
||||
|
||||
if optionals['volumes_from'] is not None:
|
||||
self.ensure_capability('volumes_from')
|
||||
params['volumes_from'] = optionals['volumes_from']
|
||||
|
||||
if optionals['restart_policy'] is not None:
|
||||
self.ensure_capability('restart_policy')
|
||||
params['restart_policy'] = { 'Name': optionals['restart_policy'] }
|
||||
if params['restart_policy']['Name'] == 'on-failure':
|
||||
params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry']
|
||||
|
||||
if optionals['pid'] is not None:
|
||||
self.ensure_capability('pid')
|
||||
params['pid_mode'] = optionals['pid']
|
||||
|
||||
if optionals['extra_hosts'] is not None:
|
||||
self.ensure_capability('extra_hosts')
|
||||
params['extra_hosts'] = optionals['extra_hosts']
|
||||
|
||||
if optionals['log_driver'] is not None:
|
||||
self.ensure_capability('log_driver')
|
||||
log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON)
|
||||
log_config.type = optionals['log_driver']
|
||||
params['log_config'] = log_config
|
||||
|
||||
return docker.utils.create_host_config(**params)
|
||||
|
||||
def create_containers(self, count=1):
|
||||
try:
|
||||
mem_limit = _human_to_bytes(self.module.params.get('memory_limit'))
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg=str(e))
|
||||
api_version = self.client.version()['ApiVersion']
|
||||
|
||||
params = {'image': self.module.params.get('image'),
|
||||
'command': self.module.params.get('command'),
|
||||
'ports': self.exposed_ports,
|
||||
'volumes': self.volumes,
|
||||
'mem_limit': mem_limit,
|
||||
'environment': self.env,
|
||||
'hostname': self.module.params.get('hostname'),
|
||||
'domainname': self.module.params.get('domainname'),
|
||||
|
@ -1302,8 +1420,18 @@ class DockerManager(object):
|
|||
'name': self.module.params.get('name'),
|
||||
'stdin_open': self.module.params.get('stdin_open'),
|
||||
'tty': self.module.params.get('tty'),
|
||||
'host_config': self.create_host_config(),
|
||||
'cpuset': self.module.params.get('cpu_set'),
|
||||
'user': self.module.params.get('docker_user'),
|
||||
}
|
||||
if self.ensure_capability('host_config', fail=False):
|
||||
params['host_config'] = self.create_host_config()
|
||||
|
||||
#For v1.19 API and above use HostConfig, otherwise use Config
|
||||
if api_version < 1.19:
|
||||
params['mem_limit'] = mem_limit
|
||||
else:
|
||||
params['host_config']['Memory'] = mem_limit
|
||||
|
||||
|
||||
def do_create(count, params):
|
||||
results = []
|
||||
|
@ -1316,17 +1444,32 @@ class DockerManager(object):
|
|||
|
||||
try:
|
||||
containers = do_create(count, params)
|
||||
except:
|
||||
except docker.errors.APIError as e:
|
||||
if e.response.status_code != 404:
|
||||
raise
|
||||
|
||||
self.pull_image()
|
||||
containers = do_create(count, params)
|
||||
|
||||
return containers
|
||||
|
||||
def start_containers(self, containers):
|
||||
params = {}
|
||||
|
||||
if not self.ensure_capability('host_config', fail=False):
|
||||
params = self.get_start_params()
|
||||
|
||||
for i in containers:
|
||||
self.client.start(i)
|
||||
self.increment_counter('started')
|
||||
|
||||
if not self.module.params.get('detach'):
|
||||
status = self.client.wait(i['Id'])
|
||||
if status != 0:
|
||||
output = self.client.logs(i['Id'], stdout=True, stderr=True,
|
||||
stream=False, timestamps=False)
|
||||
self.module.fail_json(status=status, msg=output)
|
||||
|
||||
def stop_containers(self, containers):
|
||||
for i in containers:
|
||||
self.client.stop(i['Id'])
|
||||
|
@ -1341,7 +1484,7 @@ class DockerManager(object):
|
|||
|
||||
def kill_containers(self, containers):
|
||||
for i in containers:
|
||||
self.client.kill(i['Id'])
|
||||
self.client.kill(i['Id'], self.module.params.get('signal'))
|
||||
self.increment_counter('killed')
|
||||
|
||||
def restart_containers(self, containers):
|
||||
|
@ -1495,6 +1638,7 @@ def main():
|
|||
tls_ca_cert = dict(required=False, default=None, type='str'),
|
||||
tls_hostname = dict(required=False, type='str', default=None),
|
||||
docker_api_version = dict(required=False, default=DEFAULT_DOCKER_API_VERSION, type='str'),
|
||||
docker_user = dict(default=None),
|
||||
username = dict(default=None),
|
||||
password = dict(),
|
||||
email = dict(),
|
||||
|
@ -1505,6 +1649,7 @@ def main():
|
|||
dns = dict(),
|
||||
detach = dict(default=True, type='bool'),
|
||||
state = dict(default='started', choices=['present', 'started', 'reloaded', 'restarted', 'stopped', 'killed', 'absent', 'running']),
|
||||
signal = dict(default=None),
|
||||
restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']),
|
||||
restart_policy_retry = dict(default=0, type='int'),
|
||||
extra_hosts = dict(type='dict'),
|
||||
|
@ -1518,6 +1663,11 @@ def main():
|
|||
pid = dict(default=None),
|
||||
insecure_registry = dict(default=False, type='bool'),
|
||||
log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']),
|
||||
log_opt = dict(default=None, type='dict'),
|
||||
cpu_set = dict(default=None),
|
||||
cap_add = dict(default=None, type='list'),
|
||||
cap_drop = dict(default=None, type='list'),
|
||||
read_only = dict(default=None, type='bool'),
|
||||
),
|
||||
required_together = (
|
||||
['tls_client_cert', 'tls_client_key'],
|
||||
|
@ -1543,10 +1693,14 @@ def main():
|
|||
if count > 1 and name:
|
||||
module.fail_json(msg="Count and name must not be used together")
|
||||
|
||||
# Explicitly pull new container images, if requested.
|
||||
# Do this before noticing running and deployed containers so that the image names will differ
|
||||
# if a newer image has been pulled.
|
||||
if pull == "always":
|
||||
# Explicitly pull new container images, if requested. Do this before
|
||||
# noticing running and deployed containers so that the image names
|
||||
# will differ if a newer image has been pulled.
|
||||
# Missing images should be pulled first to avoid downtime when old
|
||||
# container is stopped, but image for new one is now downloaded yet.
|
||||
# It also prevents removal of running container before realizing
|
||||
# that requested image cannot be retrieved.
|
||||
if pull == "always" or (state == 'reloaded' and manager.get_inspect_image() is None):
|
||||
manager.pull_image()
|
||||
|
||||
containers = ContainerSet(manager)
|
||||
|
@ -1575,7 +1729,7 @@ def main():
|
|||
summary=manager.counters,
|
||||
containers=containers.changed,
|
||||
reload_reasons=manager.get_reload_reason_message(),
|
||||
ansible_facts=_ansible_facts(containers.changed))
|
||||
ansible_facts=_ansible_facts(manager.get_inspect_containers(containers.changed)))
|
||||
|
||||
except DockerAPIError as e:
|
||||
module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation)
|
||||
|
|
|
@ -137,6 +137,7 @@ try:
|
|||
except ImportError:
|
||||
HAS_DOCKER_CLIENT = False
|
||||
|
||||
DEFAULT_DOCKER_API_VERSION = None
|
||||
if HAS_DOCKER_CLIENT:
|
||||
try:
|
||||
from docker.errors import APIError as DockerAPIError
|
||||
|
|
|
@ -53,7 +53,7 @@ options:
|
|||
required: false
|
||||
default: private
|
||||
headers:
|
||||
version_added: 2.0
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- Headers to attach to object.
|
||||
required: false
|
||||
|
@ -211,15 +211,6 @@ def create_dirkey(module, gs, bucket, obj):
|
|||
except gs.provider.storage_response_error, e:
|
||||
module.fail_json(msg= str(e))
|
||||
|
||||
def upload_file_check(src):
|
||||
if os.path.exists(src):
|
||||
file_exists is True
|
||||
else:
|
||||
file_exists is False
|
||||
if os.path.isdir(src):
|
||||
module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True)
|
||||
return file_exists
|
||||
|
||||
def path_check(path):
|
||||
if os.path.exists(path):
|
||||
return True
|
||||
|
@ -284,7 +275,7 @@ def get_download_url(module, gs, bucket, obj, expiry):
|
|||
|
||||
def handle_get(module, gs, bucket, obj, overwrite, dest):
|
||||
md5_remote = keysum(module, gs, bucket, obj)
|
||||
md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest()
|
||||
md5_local = module.md5(dest)
|
||||
if md5_local == md5_remote:
|
||||
module.exit_json(changed=False)
|
||||
if md5_local != md5_remote and not overwrite:
|
||||
|
@ -300,7 +291,7 @@ def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
|
|||
# Lets check key state. Does it exist and if it does, compute the etag md5sum.
|
||||
if bucket_rc and key_rc:
|
||||
md5_remote = keysum(module, gs, bucket, obj)
|
||||
md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest()
|
||||
md5_local = module.md5(src)
|
||||
if md5_local == md5_remote:
|
||||
module.exit_json(msg="Local and remote object are identical", changed=False)
|
||||
if md5_local != md5_remote and not overwrite:
|
||||
|
|
|
@ -32,77 +32,65 @@ options:
|
|||
- image string to use for the instance
|
||||
required: false
|
||||
default: "debian-7"
|
||||
aliases: []
|
||||
instance_names:
|
||||
description:
|
||||
- a comma-separated list of instance names to create or destroy
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
machine_type:
|
||||
description:
|
||||
- machine type to use for the instance, use 'n1-standard-1' by default
|
||||
required: false
|
||||
default: "n1-standard-1"
|
||||
aliases: []
|
||||
metadata:
|
||||
description:
|
||||
- a hash/dictionary of custom data for the instance; '{"key":"value",...}'
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
service_account_email:
|
||||
version_added: 1.5.1
|
||||
version_added: "1.5.1"
|
||||
description:
|
||||
- service account email
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
service_account_permissions:
|
||||
version_added: 2.0
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- service account permissions (see U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), --scopes section for detailed information)
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
choices: ["bigquery", "cloud-platform", "compute-ro", "compute-rw", "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write", "monitoring", "sql", "sql-admin", "storage-full", "storage-ro", "storage-rw", "taskqueue", "userinfo-email"]
|
||||
pem_file:
|
||||
version_added: 1.5.1
|
||||
version_added: "1.5.1"
|
||||
description:
|
||||
- path to the pem file associated with the service account email
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
project_id:
|
||||
version_added: 1.5.1
|
||||
version_added: "1.5.1"
|
||||
description:
|
||||
- your GCE project ID
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
name:
|
||||
description:
|
||||
- identifier when working with a single instance
|
||||
required: false
|
||||
aliases: []
|
||||
network:
|
||||
description:
|
||||
- name of the network, 'default' will be used if not specified
|
||||
required: false
|
||||
default: "default"
|
||||
aliases: []
|
||||
persistent_boot_disk:
|
||||
description:
|
||||
- if set, create the instance with a persistent boot disk
|
||||
required: false
|
||||
default: "false"
|
||||
aliases: []
|
||||
disks:
|
||||
description:
|
||||
- a list of persistent disks to attach to the instance; a string value gives the name of the disk; alternatively, a dictionary value can define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry will be the boot disk (which must be READ_WRITE).
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: "1.7"
|
||||
state:
|
||||
description:
|
||||
|
@ -110,40 +98,34 @@ options:
|
|||
required: false
|
||||
default: "present"
|
||||
choices: ["active", "present", "absent", "deleted"]
|
||||
aliases: []
|
||||
tags:
|
||||
description:
|
||||
- a comma-separated list of tags to associate with the instance
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
zone:
|
||||
description:
|
||||
- the GCE zone to use
|
||||
required: true
|
||||
default: "us-central1-a"
|
||||
aliases: []
|
||||
ip_forward:
|
||||
version_added: "1.9"
|
||||
description:
|
||||
- set to true if the instance can forward ip packets (useful for gateways)
|
||||
required: false
|
||||
default: "false"
|
||||
aliases: []
|
||||
external_ip:
|
||||
version_added: "1.9"
|
||||
description:
|
||||
- type of external ip, ephemeral by default
|
||||
required: false
|
||||
default: "ephemeral"
|
||||
aliases: []
|
||||
disk_auto_delete:
|
||||
version_added: "1.9"
|
||||
description:
|
||||
- if set boot disk will be removed after instance destruction
|
||||
required: false
|
||||
default: "true"
|
||||
aliases: []
|
||||
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
|
@ -327,7 +309,7 @@ def create_instances(module, gce, instance_names):
|
|||
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
|
||||
if metadata:
|
||||
try:
|
||||
md = literal_eval(metadata)
|
||||
md = literal_eval(str(metadata))
|
||||
if not isinstance(md, dict):
|
||||
raise ValueError('metadata must be a dict')
|
||||
except ValueError, e:
|
||||
|
|
|
@ -75,7 +75,7 @@ options:
|
|||
aliases: []
|
||||
state:
|
||||
description:
|
||||
- desired state of the persistent disk
|
||||
- desired state of the network or firewall
|
||||
required: false
|
||||
default: "present"
|
||||
choices: ["active", "present", "absent", "deleted"]
|
||||
|
@ -264,7 +264,7 @@ def main():
|
|||
if fw:
|
||||
gce.ex_destroy_firewall(fw)
|
||||
changed = True
|
||||
if name:
|
||||
elif name:
|
||||
json_output['name'] = name
|
||||
network = None
|
||||
try:
|
||||
|
|
|
@ -32,6 +32,7 @@ version_added: "1.2"
|
|||
author:
|
||||
- "Benno Joy (@bennojoy)"
|
||||
- "Michael DeHaan"
|
||||
deprecated: Deprecated in 2.0. Use os_keypair instead
|
||||
short_description: Add/Delete key pair from nova
|
||||
description:
|
||||
- Add or Remove key pair from nova .
|
|
@ -36,6 +36,7 @@ version_added: "1.2"
|
|||
author:
|
||||
- "Benno Joy (@bennojoy)"
|
||||
- "Brad P. Crochet (@bcrochet)"
|
||||
deprecated: Deprecated in 2.0. Use os_floating_ip instead
|
||||
short_description: Add/Remove floating IP from an instance
|
||||
description:
|
||||
- Add or Remove a floating IP to an instance
|
|
@ -33,6 +33,7 @@ DOCUMENTATION = '''
|
|||
module: quantum_floating_ip_associate
|
||||
version_added: "1.2"
|
||||
author: "Benno Joy (@bennojoy)"
|
||||
deprecated: Deprecated in 2.0. Use os_floating_ip instead
|
||||
short_description: Associate or disassociate a particular floating IP with an instance
|
||||
description:
|
||||
- Associates or disassociates a specific floating IP with a particular instance
|
|
@ -1,5 +1,19 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Based on Jimmy Tang's implementation
|
||||
|
||||
|
@ -238,8 +252,20 @@ def ensure_user_exists(keystone, user_name, password, email, tenant_name,
|
|||
email=email, tenant_id=tenant.id)
|
||||
return (True, user.id)
|
||||
|
||||
def ensure_role_exists(keystone, role_name):
|
||||
# Get the role if it exists
|
||||
try:
|
||||
role = get_role(keystone, role_name)
|
||||
# Role does exist, we're done
|
||||
return (False, role.id)
|
||||
except KeyError:
|
||||
# Role doesn't exist yet
|
||||
pass
|
||||
|
||||
def ensure_role_exists(keystone, user_name, tenant_name, role_name,
|
||||
role = keystone.roles.create(role_name)
|
||||
return (True, role.id)
|
||||
|
||||
def ensure_user_role_exists(keystone, user_name, tenant_name, role_name,
|
||||
check_mode):
|
||||
""" Check if role exists
|
||||
|
||||
|
@ -283,9 +309,11 @@ def ensure_user_absent(keystone, user, check_mode):
|
|||
raise NotImplementedError("Not yet implemented")
|
||||
|
||||
|
||||
def ensure_role_absent(keystone, uesr, tenant, role, check_mode):
|
||||
def ensure_user_role_absent(keystone, uesr, tenant, role, check_mode):
|
||||
raise NotImplementedError("Not yet implemented")
|
||||
|
||||
def ensure_role_absent(keystone, role_name):
|
||||
raise NotImplementedError("Not yet implemented")
|
||||
|
||||
def main():
|
||||
|
||||
|
@ -364,14 +392,18 @@ def dispatch(keystone, user=None, password=None, tenant=None,
|
|||
X absent ensure_tenant_absent
|
||||
X X present ensure_user_exists
|
||||
X X absent ensure_user_absent
|
||||
X X X present ensure_role_exists
|
||||
X X X absent ensure_role_absent
|
||||
|
||||
|
||||
X X X present ensure_user_role_exists
|
||||
X X X absent ensure_user_role_absent
|
||||
X present ensure_role_exists
|
||||
X absent ensure_role_absent
|
||||
"""
|
||||
changed = False
|
||||
id = None
|
||||
if tenant and not user and not role and state == "present":
|
||||
if not tenant and not user and role and state == "present":
|
||||
changed, id = ensure_role_exists(keystone, role)
|
||||
elif not tenant and not user and role and state == "absent":
|
||||
changed = ensure_role_absent(keystone, role)
|
||||
elif tenant and not user and not role and state == "present":
|
||||
changed, id = ensure_tenant_exists(keystone, tenant,
|
||||
tenant_description, check_mode)
|
||||
elif tenant and not user and not role and state == "absent":
|
||||
|
@ -382,10 +414,10 @@ def dispatch(keystone, user=None, password=None, tenant=None,
|
|||
elif tenant and user and not role and state == "absent":
|
||||
changed = ensure_user_absent(keystone, user, check_mode)
|
||||
elif tenant and user and role and state == "present":
|
||||
changed, id = ensure_role_exists(keystone, user, tenant, role,
|
||||
changed, id = ensure_user_role_exists(keystone, user, tenant, role,
|
||||
check_mode)
|
||||
elif tenant and user and role and state == "absent":
|
||||
changed = ensure_role_absent(keystone, user, tenant, role, check_mode)
|
||||
changed = ensure_user_role_absent(keystone, user, tenant, role, check_mode)
|
||||
else:
|
||||
# Should never reach here
|
||||
raise ValueError("Code should never reach here")
|
||||
|
|
|
@ -25,28 +25,45 @@ short_description: Get OpenStack Client config
|
|||
description:
|
||||
- Get I(openstack) client config data from clouds.yaml or environment
|
||||
version_added: "2.0"
|
||||
notes:
|
||||
- Facts are placed in the C(openstack.clouds) variable.
|
||||
options:
|
||||
clouds:
|
||||
description:
|
||||
- List of clouds to limit the return list to. No value means return
|
||||
information on all configured clouds
|
||||
required: false
|
||||
default: []
|
||||
requirements: [ os-client-config ]
|
||||
author: "Monty Taylor (@emonty)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Get list of clouds that do not support security groups
|
||||
- os-client-config:
|
||||
- os_client_config:
|
||||
- debug: var={{ item }}
|
||||
with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}"
|
||||
|
||||
# Get the information back just about the mordred cloud
|
||||
- os_client_config:
|
||||
clouds:
|
||||
- mordred
|
||||
'''
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule({})
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
clouds=dict(required=False, default=[]),
|
||||
))
|
||||
p = module.params
|
||||
|
||||
try:
|
||||
config = os_client_config.OpenStackConfig()
|
||||
clouds = []
|
||||
for cloud in config.get_all_clouds():
|
||||
cloud.config['name'] = cloud.name
|
||||
clouds.append(cloud.config)
|
||||
if not p['clouds'] or cloud.name in p['clouds']:
|
||||
cloud.config['name'] = cloud.name
|
||||
clouds.append(cloud.config)
|
||||
module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds)))
|
||||
except exceptions.OpenStackConfigException as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
|
198
cloud/openstack/os_floating_ip.py
Normal file
198
cloud/openstack/os_floating_ip.py
Normal file
|
@ -0,0 +1,198 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
|
||||
# Author: Davide Guerri <davide.guerri@hp.com>
|
||||
#
|
||||
# This module is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
try:
|
||||
import shade
|
||||
from shade import meta
|
||||
|
||||
HAS_SHADE = True
|
||||
except ImportError:
|
||||
HAS_SHADE = False
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: os_floating_ip
|
||||
version_added: "2.0"
|
||||
short_description: Add/Remove floating IP from an instance
|
||||
extends_documentation_fragment: openstack
|
||||
description:
|
||||
- Add or Remove a floating IP to an instance
|
||||
options:
|
||||
server:
|
||||
description:
|
||||
- The name or ID of the instance to which the IP address
|
||||
should be assigned.
|
||||
required: true
|
||||
network:
|
||||
description:
|
||||
- The name or ID of a neutron external network or a nova pool name.
|
||||
required: false
|
||||
floating_ip_address:
|
||||
description:
|
||||
- A floating IP address to attach or to detach. Required only if state
|
||||
is absent. When state is present can be used to specify a IP address
|
||||
to attach.
|
||||
required: false
|
||||
reuse:
|
||||
description:
|
||||
- When state is present, and floating_ip_address is not present,
|
||||
this parameter can be used to specify whether we should try to reuse
|
||||
a floating IP address already allocated to the project.
|
||||
required: false
|
||||
default: false
|
||||
fixed_address:
|
||||
description:
|
||||
- To which fixed IP of server the floating IP address should be
|
||||
attached to.
|
||||
required: false
|
||||
wait:
|
||||
description:
|
||||
- When attaching a floating IP address, specify whether we should
|
||||
wait for it to appear as attached.
|
||||
required: false
|
||||
default: false
|
||||
timeout:
|
||||
description:
|
||||
- Time to wait for an IP address to appear as attached. See wait.
|
||||
required: false
|
||||
default: 60
|
||||
state:
|
||||
description:
|
||||
- Should the resource be present or absent.
|
||||
choices: [present, absent]
|
||||
required: false
|
||||
default: present
|
||||
requirements: ["shade"]
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Assign a floating IP to the fist interface of `cattle001` from an exiting
|
||||
# external network or nova pool. A new floating IP from the first available
|
||||
# external network is allocated to the project.
|
||||
- os_floating_ip:
|
||||
cloud: dguerri
|
||||
server: cattle001
|
||||
|
||||
# Assign a new floating IP to the instance fixed ip `192.0.2.3` of
|
||||
# `cattle001`. If a free floating IP is already allocated to the project, it is
|
||||
# reused; if not, a new one is created.
|
||||
- os_floating_ip:
|
||||
cloud: dguerri
|
||||
state: present
|
||||
reuse: yes
|
||||
server: cattle001
|
||||
network: ext_net
|
||||
fixed_address: 192.0.2.3
|
||||
wait: true
|
||||
timeout: 180
|
||||
|
||||
# Detach a floating IP address from a server
|
||||
- os_floating_ip:
|
||||
cloud: dguerri
|
||||
state: absent
|
||||
floating_ip_address: 203.0.113.2
|
||||
server: cattle001
|
||||
'''
|
||||
|
||||
|
||||
def _get_floating_ip(cloud, floating_ip_address):
|
||||
f_ips = cloud.search_floating_ips(
|
||||
filters={'floating_ip_address': floating_ip_address})
|
||||
if not f_ips:
|
||||
return None
|
||||
|
||||
return f_ips[0]
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = openstack_full_argument_spec(
|
||||
server=dict(required=True),
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
network=dict(required=False),
|
||||
floating_ip_address=dict(required=False),
|
||||
reuse=dict(required=False, type='bool', default=False),
|
||||
fixed_address=dict(required=False),
|
||||
wait=dict(required=False, type='bool', default=False),
|
||||
timeout=dict(required=False, type='int', default=60),
|
||||
)
|
||||
|
||||
module_kwargs = openstack_module_kwargs()
|
||||
module = AnsibleModule(argument_spec, **module_kwargs)
|
||||
|
||||
if not HAS_SHADE:
|
||||
module.fail_json(msg='shade is required for this module')
|
||||
|
||||
server_name_or_id = module.params['server']
|
||||
state = module.params['state']
|
||||
network = module.params['network']
|
||||
floating_ip_address = module.params['floating_ip_address']
|
||||
reuse = module.params['reuse']
|
||||
fixed_address = module.params['fixed_address']
|
||||
wait = module.params['wait']
|
||||
timeout = module.params['timeout']
|
||||
|
||||
cloud = shade.openstack_cloud(**module.params)
|
||||
|
||||
try:
|
||||
server = cloud.get_server(server_name_or_id)
|
||||
if server is None:
|
||||
module.fail_json(
|
||||
msg="server {0} not found".format(server_name_or_id))
|
||||
|
||||
if state == 'present':
|
||||
if floating_ip_address is None:
|
||||
if reuse:
|
||||
f_ip = cloud.available_floating_ip(network=network)
|
||||
else:
|
||||
f_ip = cloud.create_floating_ip(network=network)
|
||||
else:
|
||||
f_ip = _get_floating_ip(cloud, floating_ip_address)
|
||||
if f_ip is None:
|
||||
module.fail_json(
|
||||
msg="floating IP {0} not found".format(
|
||||
floating_ip_address))
|
||||
|
||||
cloud.attach_ip_to_server(
|
||||
server_id=server['id'], floating_ip_id=f_ip['id'],
|
||||
fixed_address=fixed_address, wait=wait, timeout=timeout)
|
||||
# Update the floating IP status
|
||||
f_ip = cloud.get_floating_ip(id=f_ip['id'])
|
||||
module.exit_json(changed=True, floating_ip=f_ip)
|
||||
|
||||
elif state == 'absent':
|
||||
if floating_ip_address is None:
|
||||
module.fail_json(msg="floating_ip_address is required")
|
||||
|
||||
f_ip = _get_floating_ip(cloud, floating_ip_address)
|
||||
|
||||
cloud.detach_ip_from_server(
|
||||
server_id=server['id'], floating_ip_id=f_ip['id'])
|
||||
# Update the floating IP status
|
||||
f_ip = cloud.get_floating_ip(id=f_ip['id'])
|
||||
module.exit_json(changed=True, floating_ip=f_ip)
|
||||
|
||||
except shade.OpenStackCloudException as e:
|
||||
module.fail_json(msg=e.message, extra_data=e.extra_data)
|
||||
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.openstack import *
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
167
cloud/openstack/os_keypair.py
Normal file
167
cloud/openstack/os_keypair.py
Normal file
|
@ -0,0 +1,167 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
|
||||
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
|
||||
# Copyright (c) 2013, John Dewey <john@dewey.ws>
|
||||
#
|
||||
# This module is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
try:
|
||||
import shade
|
||||
HAS_SHADE = True
|
||||
except ImportError:
|
||||
HAS_SHADE = False
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: os_keypair
|
||||
short_description: Add/Delete a keypair from OpenStack
|
||||
extends_documentation_fragment: openstack
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- Add or Remove key pair from OpenStack
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name that has to be given to the key pair
|
||||
required: true
|
||||
default: None
|
||||
public_key:
|
||||
description:
|
||||
- The public key that would be uploaded to nova and injected into VMs
|
||||
upon creation.
|
||||
required: false
|
||||
default: None
|
||||
public_key_file:
|
||||
description:
|
||||
- Path to local file containing ssh public key. Mutually exclusive
|
||||
with public_key.
|
||||
required: false
|
||||
default: None
|
||||
state:
|
||||
description:
|
||||
- Should the resource be present or absent.
|
||||
choices: [present, absent]
|
||||
default: present
|
||||
requirements: []
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Creates a key pair with the running users public key
|
||||
- os_keypair:
|
||||
cloud: mordred
|
||||
state: present
|
||||
name: ansible_key
|
||||
public_key_file: /home/me/.ssh/id_rsa.pub
|
||||
|
||||
# Creates a new key pair and the private key returned after the run.
|
||||
- os_keypair:
|
||||
cloud: rax-dfw
|
||||
state: present
|
||||
name: ansible_key
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description: Unique UUID.
|
||||
returned: success
|
||||
type: string
|
||||
name:
|
||||
description: Name given to the keypair.
|
||||
returned: success
|
||||
type: string
|
||||
public_key:
|
||||
description: The public key value for the keypair.
|
||||
returned: success
|
||||
type: string
|
||||
private_key:
|
||||
description: The private key value for the keypair.
|
||||
returned: Only when a keypair is generated for the user (e.g., when creating one
|
||||
and a public key is not specified).
|
||||
type: string
|
||||
'''
|
||||
|
||||
|
||||
def _system_state_change(module, keypair):
|
||||
state = module.params['state']
|
||||
if state == 'present' and not keypair:
|
||||
return True
|
||||
if state == 'absent' and keypair:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = openstack_full_argument_spec(
|
||||
name = dict(required=True),
|
||||
public_key = dict(default=None),
|
||||
public_key_file = dict(default=None),
|
||||
state = dict(default='present',
|
||||
choices=['absent', 'present']),
|
||||
)
|
||||
|
||||
module_kwargs = openstack_module_kwargs(
|
||||
mutually_exclusive=[['public_key', 'public_key_file']])
|
||||
|
||||
module = AnsibleModule(argument_spec,
|
||||
supports_check_mode=True,
|
||||
**module_kwargs)
|
||||
|
||||
if not HAS_SHADE:
|
||||
module.fail_json(msg='shade is required for this module')
|
||||
|
||||
state = module.params['state']
|
||||
name = module.params['name']
|
||||
public_key = module.params['public_key']
|
||||
|
||||
if module.params['public_key_file']:
|
||||
public_key = open(module.params['public_key_file']).read()
|
||||
public_key = public_key.rstrip()
|
||||
|
||||
try:
|
||||
cloud = shade.openstack_cloud(**module.params)
|
||||
keypair = cloud.get_keypair(name)
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=_system_state_change(module, keypair))
|
||||
|
||||
if state == 'present':
|
||||
if keypair and keypair['name'] == name:
|
||||
if public_key and (public_key != keypair['public_key']):
|
||||
module.fail_json(
|
||||
msg="Key name %s present but key hash not the same"
|
||||
" as offered. Delete key first." % name
|
||||
)
|
||||
else:
|
||||
module.exit_json(changed=False, key=keypair)
|
||||
|
||||
new_key = cloud.create_keypair(name, public_key)
|
||||
module.exit_json(changed=True, key=new_key)
|
||||
|
||||
elif state == 'absent':
|
||||
if keypair:
|
||||
cloud.delete_keypair(name)
|
||||
module.exit_json(changed=True)
|
||||
module.exit_json(changed=False)
|
||||
|
||||
except shade.OpenStackCloudException as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.openstack import *
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -57,8 +57,13 @@ requirements: ["shade"]
|
|||
|
||||
EXAMPLES = '''
|
||||
- os_network:
|
||||
name=t1network
|
||||
state=present
|
||||
name: t1network
|
||||
state: present
|
||||
auth:
|
||||
auth_url: https://your_api_url.com:9000/v2.0
|
||||
username: user
|
||||
password: password
|
||||
project_name: someproject
|
||||
'''
|
||||
|
||||
|
||||
|
|
237
cloud/openstack/os_nova_flavor.py
Normal file
237
cloud/openstack/os_nova_flavor.py
Normal file
|
@ -0,0 +1,237 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# This module is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
try:
|
||||
import shade
|
||||
HAS_SHADE = True
|
||||
except ImportError:
|
||||
HAS_SHADE = False
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: os_nova_flavor
|
||||
short_description: Manage OpenStack compute flavors
|
||||
extends_documentation_fragment: openstack
|
||||
version_added: "2.0"
|
||||
author: "David Shrewsbury (@Shrews)"
|
||||
description:
|
||||
- Add or remove flavors from OpenStack.
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource. When I(state) is 'present',
|
||||
then I(ram), I(vcpus), and I(disk) are all required. There are no
|
||||
default values for those parameters.
|
||||
choices: ['present', 'absent']
|
||||
required: false
|
||||
default: present
|
||||
name:
|
||||
description:
|
||||
- Flavor name.
|
||||
required: true
|
||||
ram:
|
||||
description:
|
||||
- Amount of memory, in MB.
|
||||
required: false
|
||||
default: null
|
||||
vcpus:
|
||||
description:
|
||||
- Number of virtual CPUs.
|
||||
required: false
|
||||
default: null
|
||||
disk:
|
||||
description:
|
||||
- Size of local disk, in GB.
|
||||
required: false
|
||||
default: null
|
||||
ephemeral:
|
||||
description:
|
||||
- Ephemeral space size, in GB.
|
||||
required: false
|
||||
default: 0
|
||||
swap:
|
||||
description:
|
||||
- Swap space size, in MB.
|
||||
required: false
|
||||
default: 0
|
||||
rxtx_factor:
|
||||
description:
|
||||
- RX/TX factor.
|
||||
required: false
|
||||
default: 1.0
|
||||
is_public:
|
||||
description:
|
||||
- Make flavor accessible to the public.
|
||||
required: false
|
||||
default: true
|
||||
flavorid:
|
||||
description:
|
||||
- ID for the flavor. This is optional as a unique UUID will be
|
||||
assigned if a value is not specified.
|
||||
required: false
|
||||
default: "auto"
|
||||
requirements: ["shade"]
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create 'tiny' flavor with 1024MB of RAM, 1 virtual CPU, and 10GB of
|
||||
# local disk, and 10GB of ephemeral.
|
||||
- os_nova_flavor:
|
||||
cloud=mycloud
|
||||
state=present
|
||||
name=tiny
|
||||
ram=1024
|
||||
vcpus=1
|
||||
disk=10
|
||||
ephemeral=10
|
||||
|
||||
# Delete 'tiny' flavor
|
||||
- os_nova_flavor:
|
||||
cloud=mycloud
|
||||
state=absent
|
||||
name=tiny
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
flavor:
|
||||
description: Dictionary describing the flavor.
|
||||
returned: On success when I(state) is 'present'
|
||||
type: dictionary
|
||||
contains:
|
||||
id:
|
||||
description: Flavor ID.
|
||||
returned: success
|
||||
type: string
|
||||
sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
|
||||
name:
|
||||
description: Flavor name.
|
||||
returned: success
|
||||
type: string
|
||||
sample: "tiny"
|
||||
disk:
|
||||
description: Size of local disk, in GB.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 10
|
||||
ephemeral:
|
||||
description: Ephemeral space size, in GB.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 10
|
||||
ram:
|
||||
description: Amount of memory, in MB.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 1024
|
||||
swap:
|
||||
description: Swap space size, in MB.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 100
|
||||
vcpus:
|
||||
description: Number of virtual CPUs.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 2
|
||||
is_public:
|
||||
description: Make flavor accessible to the public.
|
||||
returned: success
|
||||
type: bool
|
||||
sample: true
|
||||
'''
|
||||
|
||||
|
||||
def _system_state_change(module, flavor):
|
||||
state = module.params['state']
|
||||
if state == 'present' and not flavor:
|
||||
return True
|
||||
if state == 'absent' and flavor:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = openstack_full_argument_spec(
|
||||
state = dict(required=False, default='present',
|
||||
choices=['absent', 'present']),
|
||||
name = dict(required=False),
|
||||
|
||||
# required when state is 'present'
|
||||
ram = dict(required=False, type='int'),
|
||||
vcpus = dict(required=False, type='int'),
|
||||
disk = dict(required=False, type='int'),
|
||||
|
||||
ephemeral = dict(required=False, default=0, type='int'),
|
||||
swap = dict(required=False, default=0, type='int'),
|
||||
rxtx_factor = dict(required=False, default=1.0, type='float'),
|
||||
is_public = dict(required=False, default=True, type='bool'),
|
||||
flavorid = dict(required=False, default="auto"),
|
||||
)
|
||||
|
||||
module_kwargs = openstack_module_kwargs()
|
||||
module = AnsibleModule(
|
||||
argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=[
|
||||
('state', 'present', ['ram', 'vcpus', 'disk'])
|
||||
],
|
||||
**module_kwargs)
|
||||
|
||||
if not HAS_SHADE:
|
||||
module.fail_json(msg='shade is required for this module')
|
||||
|
||||
state = module.params['state']
|
||||
name = module.params['name']
|
||||
|
||||
try:
|
||||
cloud = shade.operator_cloud(**module.params)
|
||||
flavor = cloud.get_flavor(name)
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=_system_state_change(module, flavor))
|
||||
|
||||
if state == 'present':
|
||||
if not flavor:
|
||||
flavor = cloud.create_flavor(
|
||||
name=name,
|
||||
ram=module.params['ram'],
|
||||
vcpus=module.params['vcpus'],
|
||||
disk=module.params['disk'],
|
||||
flavorid=module.params['flavorid'],
|
||||
ephemeral=module.params['ephemeral'],
|
||||
swap=module.params['swap'],
|
||||
rxtx_factor=module.params['rxtx_factor'],
|
||||
is_public=module.params['is_public']
|
||||
)
|
||||
module.exit_json(changed=True, flavor=flavor)
|
||||
module.exit_json(changed=False, flavor=flavor)
|
||||
|
||||
elif state == 'absent':
|
||||
if flavor:
|
||||
cloud.delete_flavor(name)
|
||||
module.exit_json(changed=True)
|
||||
module.exit_json(changed=False)
|
||||
|
||||
except shade.OpenStackCloudException as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.openstack import *
|
||||
if __name__ == '__main__':
|
||||
main()
|
327
cloud/openstack/os_security_group_rule.py
Normal file
327
cloud/openstack/os_security_group_rule.py
Normal file
|
@ -0,0 +1,327 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
|
||||
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
|
||||
#
|
||||
# This module is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
try:
|
||||
import shade
|
||||
HAS_SHADE = True
|
||||
except ImportError:
|
||||
HAS_SHADE = False
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: os_security_group_rule
|
||||
short_description: Add/Delete rule from an existing security group
|
||||
extends_documentation_fragment: openstack
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- Add or Remove rule from an existing security group
|
||||
options:
|
||||
security_group:
|
||||
description:
|
||||
- Name of the security group
|
||||
required: true
|
||||
protocol:
|
||||
description:
|
||||
- IP protocol
|
||||
choices: ['tcp', 'udp', 'icmp', None]
|
||||
default: None
|
||||
port_range_min:
|
||||
description:
|
||||
- Starting port
|
||||
required: false
|
||||
default: None
|
||||
port_range_max:
|
||||
description:
|
||||
- Ending port
|
||||
required: false
|
||||
default: None
|
||||
remote_ip_prefix:
|
||||
description:
|
||||
- Source IP address(es) in CIDR notation (exclusive with remote_group)
|
||||
required: false
|
||||
remote_group:
|
||||
description:
|
||||
- ID of Security group to link (exclusive with remote_ip_prefix)
|
||||
required: false
|
||||
ethertype:
|
||||
description:
|
||||
- Must be IPv4 or IPv6, and addresses represented in CIDR must
|
||||
match the ingress or egress rules. Not all providers support IPv6.
|
||||
choices: ['IPv4', 'IPv6']
|
||||
default: IPv4
|
||||
direction:
|
||||
description:
|
||||
- The direction in which the security group rule is applied. Not
|
||||
all providers support egress.
|
||||
choices: ['egress', 'ingress']
|
||||
default: ingress
|
||||
state:
|
||||
description:
|
||||
- Should the resource be present or absent.
|
||||
choices: [present, absent]
|
||||
default: present
|
||||
requirements: ["shade"]
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create a security group rule
|
||||
- os_security_group_rule:
|
||||
cloud: mordred
|
||||
security_group: foo
|
||||
protocol: tcp
|
||||
port_range_min: 80
|
||||
port_range_max: 80
|
||||
remote_ip_prefix: 0.0.0.0/0
|
||||
|
||||
# Create a security group rule for ping
|
||||
- os_security_group_rule:
|
||||
cloud: mordred
|
||||
security_group: foo
|
||||
protocol: icmp
|
||||
remote_ip_prefix: 0.0.0.0/0
|
||||
|
||||
# Another way to create the ping rule
|
||||
- os_security_group_rule:
|
||||
cloud: mordred
|
||||
security_group: foo
|
||||
protocol: icmp
|
||||
port_range_min: -1
|
||||
port_range_max: -1
|
||||
remote_ip_prefix: 0.0.0.0/0
|
||||
|
||||
# Create a TCP rule covering all ports
|
||||
- os_security_group_rule:
|
||||
cloud: mordred
|
||||
security_group: foo
|
||||
protocol: tcp
|
||||
port_range_min: 1
|
||||
port_range_max: 65535
|
||||
remote_ip_prefix: 0.0.0.0/0
|
||||
|
||||
# Another way to create the TCP rule above (defaults to all ports)
|
||||
- os_security_group_rule:
|
||||
cloud: mordred
|
||||
security_group: foo
|
||||
protocol: tcp
|
||||
remote_ip_prefix: 0.0.0.0/0
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description: Unique rule UUID.
|
||||
type: string
|
||||
direction:
|
||||
description: The direction in which the security group rule is applied.
|
||||
type: string
|
||||
sample: 'egress'
|
||||
ethertype:
|
||||
description: One of IPv4 or IPv6.
|
||||
type: string
|
||||
sample: 'IPv4'
|
||||
port_range_min:
|
||||
description: The minimum port number in the range that is matched by
|
||||
the security group rule.
|
||||
type: int
|
||||
sample: 8000
|
||||
port_range_max:
|
||||
description: The maximum port number in the range that is matched by
|
||||
the security group rule.
|
||||
type: int
|
||||
sample: 8000
|
||||
protocol:
|
||||
description: The protocol that is matched by the security group rule.
|
||||
type: string
|
||||
sample: 'tcp'
|
||||
remote_ip_prefix:
|
||||
description: The remote IP prefix to be associated with this security group rule.
|
||||
type: string
|
||||
sample: '0.0.0.0/0'
|
||||
security_group_id:
|
||||
description: The security group ID to associate with this security group rule.
|
||||
type: string
|
||||
'''
|
||||
|
||||
|
||||
def _ports_match(protocol, module_min, module_max, rule_min, rule_max):
|
||||
"""
|
||||
Capture the complex port matching logic.
|
||||
|
||||
The port values coming in for the module might be -1 (for ICMP),
|
||||
which will work only for Nova, but this is handled by shade. Likewise,
|
||||
they might be None, which works for Neutron, but not Nova. This too is
|
||||
handled by shade. Since shade will consistently return these port
|
||||
values as None, we need to convert any -1 values input to the module
|
||||
to None here for comparison.
|
||||
|
||||
For TCP and UDP protocols, None values for both min and max are
|
||||
represented as the range 1-65535 for Nova, but remain None for
|
||||
Neutron. Shade returns the full range when Nova is the backend (since
|
||||
that is how Nova stores them), and None values for Neutron. If None
|
||||
values are input to the module for both values, then we need to adjust
|
||||
for comparison.
|
||||
"""
|
||||
|
||||
# Check if the user is supplying -1 for ICMP.
|
||||
if protocol == 'icmp':
|
||||
if module_min and int(module_min) == -1:
|
||||
module_min = None
|
||||
if module_max and int(module_max) == -1:
|
||||
module_max = None
|
||||
|
||||
# Check if user is supplying None values for full TCP/UDP port range.
|
||||
if protocol in ['tcp', 'udp'] and module_min is None and module_max is None:
|
||||
if (rule_min and int(rule_min) == 1
|
||||
and rule_max and int(rule_max) == 65535):
|
||||
# (None, None) == (1, 65535)
|
||||
return True
|
||||
|
||||
# Sanity check to make sure we don't have type comparison issues.
|
||||
if module_min:
|
||||
module_min = int(module_min)
|
||||
if module_max:
|
||||
module_max = int(module_max)
|
||||
if rule_min:
|
||||
rule_min = int(rule_min)
|
||||
if rule_max:
|
||||
rule_max = int(rule_max)
|
||||
|
||||
return module_min == rule_min and module_max == rule_max
|
||||
|
||||
|
||||
def _find_matching_rule(module, secgroup):
|
||||
"""
|
||||
Find a rule in the group that matches the module parameters.
|
||||
:returns: The matching rule dict, or None if no matches.
|
||||
"""
|
||||
protocol = module.params['protocol']
|
||||
remote_ip_prefix = module.params['remote_ip_prefix']
|
||||
ethertype = module.params['ethertype']
|
||||
direction = module.params['direction']
|
||||
remote_group_id = module.params['remote_group']
|
||||
|
||||
for rule in secgroup['security_group_rules']:
|
||||
if (protocol == rule['protocol']
|
||||
and remote_ip_prefix == rule['remote_ip_prefix']
|
||||
and ethertype == rule['ethertype']
|
||||
and direction == rule['direction']
|
||||
and remote_group_id == rule['remote_group_id']
|
||||
and _ports_match(protocol,
|
||||
module.params['port_range_min'],
|
||||
module.params['port_range_max'],
|
||||
rule['port_range_min'],
|
||||
rule['port_range_max'])):
|
||||
return rule
|
||||
return None
|
||||
|
||||
|
||||
def _system_state_change(module, secgroup):
|
||||
state = module.params['state']
|
||||
if secgroup:
|
||||
rule_exists = _find_matching_rule(module, secgroup)
|
||||
else:
|
||||
return False
|
||||
|
||||
if state == 'present' and not rule_exists:
|
||||
return True
|
||||
if state == 'absent' and rule_exists:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = openstack_full_argument_spec(
|
||||
security_group = dict(required=True),
|
||||
# NOTE(Shrews): None is an acceptable protocol value for
|
||||
# Neutron, but Nova will balk at this.
|
||||
protocol = dict(default=None,
|
||||
choices=[None, 'tcp', 'udp', 'icmp']),
|
||||
port_range_min = dict(required=False, type='int'),
|
||||
port_range_max = dict(required=False, type='int'),
|
||||
remote_ip_prefix = dict(required=False, default=None),
|
||||
# TODO(mordred): Make remote_group handle name and id
|
||||
remote_group = dict(required=False, default=None),
|
||||
ethertype = dict(default='IPv4',
|
||||
choices=['IPv4', 'IPv6']),
|
||||
direction = dict(default='ingress',
|
||||
choices=['egress', 'ingress']),
|
||||
state = dict(default='present',
|
||||
choices=['absent', 'present']),
|
||||
)
|
||||
|
||||
module_kwargs = openstack_module_kwargs(
|
||||
mutually_exclusive=[
|
||||
['remote_ip_prefix', 'remote_group'],
|
||||
]
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec,
|
||||
supports_check_mode=True,
|
||||
**module_kwargs)
|
||||
|
||||
if not HAS_SHADE:
|
||||
module.fail_json(msg='shade is required for this module')
|
||||
|
||||
state = module.params['state']
|
||||
security_group = module.params['security_group']
|
||||
changed = False
|
||||
|
||||
try:
|
||||
cloud = shade.openstack_cloud(**module.params)
|
||||
secgroup = cloud.get_security_group(security_group)
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=_system_state_change(module, secgroup))
|
||||
|
||||
if state == 'present':
|
||||
if not secgroup:
|
||||
module.fail_json(msg='Could not find security group %s' %
|
||||
security_group)
|
||||
|
||||
rule = _find_matching_rule(module, secgroup)
|
||||
if not rule:
|
||||
rule = cloud.create_security_group_rule(
|
||||
secgroup['id'],
|
||||
port_range_min=module.params['port_range_min'],
|
||||
port_range_max=module.params['port_range_max'],
|
||||
protocol=module.params['protocol'],
|
||||
remote_ip_prefix=module.params['remote_ip_prefix'],
|
||||
remote_group_id=module.params['remote_group'],
|
||||
direction=module.params['direction'],
|
||||
ethertype=module.params['ethertype']
|
||||
)
|
||||
changed = True
|
||||
module.exit_json(changed=changed, rule=rule, id=rule['id'])
|
||||
|
||||
if state == 'absent' and secgroup:
|
||||
rule = _find_matching_rule(module, secgroup)
|
||||
if rule:
|
||||
cloud.delete_security_group_rule(rule['id'])
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
except shade.OpenStackCloudException as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.openstack import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -90,6 +90,11 @@ options:
|
|||
- Ensure instance has public ip however the cloud wants to do that
|
||||
required: false
|
||||
default: 'yes'
|
||||
auto_floating_ip:
|
||||
description:
|
||||
- If the module should automatically assign a floating IP
|
||||
required: false
|
||||
default: 'yes'
|
||||
floating_ips:
|
||||
description:
|
||||
- list of valid floating IPs that pre-exist to assign to this node
|
||||
|
@ -132,7 +137,7 @@ options:
|
|||
- Boot instance from a volume
|
||||
required: false
|
||||
default: None
|
||||
terminate_volume:
|
||||
terminate_volume:
|
||||
description:
|
||||
- If true, delete volume when deleting instance (if booted from volume)
|
||||
default: false
|
||||
|
@ -257,6 +262,15 @@ def _network_args(module, cloud):
|
|||
msg='Could not find network by net-name: %s' %
|
||||
net['net-name'])
|
||||
args.append({'net-id': by_name['id']})
|
||||
elif net.get('port-id'):
|
||||
args.append(net)
|
||||
elif net.get('port-name'):
|
||||
by_name = cloud.get_port(net['port-name'])
|
||||
if not by_name:
|
||||
module.fail_json(
|
||||
msg='Could not find port by port-name: %s' %
|
||||
net['port-name'])
|
||||
args.append({'port-id': by_name['id']})
|
||||
return args
|
||||
|
||||
|
||||
|
@ -282,8 +296,12 @@ def _create_server(module, cloud):
|
|||
|
||||
if flavor:
|
||||
flavor_dict = cloud.get_flavor(flavor)
|
||||
if not flavor_dict:
|
||||
module.fail_json(msg="Could not find flavor %s" % flavor)
|
||||
else:
|
||||
flavor_dict = cloud.get_flavor_by_ram(flavor_ram, flavor_include)
|
||||
if not flavor_dict:
|
||||
module.fail_json(msg="Could not find any matching flavor")
|
||||
|
||||
nics = _network_args(module, cloud)
|
||||
|
||||
|
@ -387,7 +405,7 @@ def main():
|
|||
flavor_include = dict(default=None),
|
||||
key_name = dict(default=None),
|
||||
security_groups = dict(default='default'),
|
||||
nics = dict(default=[]),
|
||||
nics = dict(default=[], type='list'),
|
||||
meta = dict(default=None),
|
||||
userdata = dict(default=None),
|
||||
config_drive = dict(default=False, type='bool'),
|
||||
|
|
|
@ -92,6 +92,18 @@ options:
|
|||
- A list of host route dictionaries for the subnet.
|
||||
required: false
|
||||
default: None
|
||||
ipv6_ra_mode:
|
||||
description:
|
||||
- IPv6 router advertisement mode
|
||||
choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
|
||||
required: false
|
||||
default: None
|
||||
ipv6_address_mode:
|
||||
description:
|
||||
- IPv6 address mode
|
||||
choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
|
||||
required: false
|
||||
default: None
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "shade"
|
||||
|
@ -117,11 +129,53 @@ EXAMPLES = '''
|
|||
- os_subnet:
|
||||
state=absent
|
||||
name=net1subnet
|
||||
|
||||
# Create an ipv6 stateless subnet
|
||||
- os_subnet:
|
||||
state: present
|
||||
name: intv6
|
||||
network_name: internal
|
||||
ip_version: 6
|
||||
cidr: 2db8:1::/64
|
||||
dns_nameservers:
|
||||
- 2001:4860:4860::8888
|
||||
- 2001:4860:4860::8844
|
||||
ipv6_ra_mode: dhcpv6-stateless
|
||||
ipv6_address_mode: dhcpv6-stateless
|
||||
'''
|
||||
|
||||
def _can_update(subnet, module, cloud):
|
||||
"""Check for differences in non-updatable values"""
|
||||
network_name = module.params['network_name']
|
||||
cidr = module.params['cidr']
|
||||
ip_version = int(module.params['ip_version'])
|
||||
ipv6_ra_mode = module.params['ipv6_ra_mode']
|
||||
ipv6_a_mode = module.params['ipv6_address_mode']
|
||||
|
||||
def _needs_update(subnet, module):
|
||||
if network_name:
|
||||
network = cloud.get_network(network_name)
|
||||
if network:
|
||||
netid = network['id']
|
||||
else:
|
||||
module.fail_json(msg='No network found for %s' % network_name)
|
||||
if netid != subnet['network_id']:
|
||||
module.fail_json(msg='Cannot update network_name in existing \
|
||||
subnet')
|
||||
if ip_version and subnet['ip_version'] != ip_version:
|
||||
module.fail_json(msg='Cannot update ip_version in existing subnet')
|
||||
if ipv6_ra_mode and subnet.get('ipv6_ra_mode', None) != ip_version:
|
||||
module.fail_json(msg='Cannot update ipv6_ra_mode in existing subnet')
|
||||
if ipv6_a_mode and subnet.get('ipv6_address_mode', None) != ipv6_a_mode:
|
||||
module.fail_json(msg='Cannot update ipv6_address_mode in existing \
|
||||
subnet')
|
||||
|
||||
def _needs_update(subnet, module, cloud):
|
||||
"""Check for differences in the updatable values."""
|
||||
|
||||
# First check if we are trying to update something we're not allowed to
|
||||
_can_update(subnet, module, cloud)
|
||||
|
||||
# now check for the things we are allowed to update
|
||||
enable_dhcp = module.params['enable_dhcp']
|
||||
subnet_name = module.params['name']
|
||||
pool_start = module.params['allocation_pool_start']
|
||||
|
@ -151,18 +205,19 @@ def _needs_update(subnet, module):
|
|||
return False
|
||||
|
||||
|
||||
def _system_state_change(module, subnet):
|
||||
def _system_state_change(module, subnet, cloud):
|
||||
state = module.params['state']
|
||||
if state == 'present':
|
||||
if not subnet:
|
||||
return True
|
||||
return _needs_update(subnet, module)
|
||||
return _needs_update(subnet, module, cloud)
|
||||
if state == 'absent' and subnet:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
ipv6_mode_choices = ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
|
||||
argument_spec = openstack_full_argument_spec(
|
||||
name=dict(required=True),
|
||||
network_name=dict(default=None),
|
||||
|
@ -174,6 +229,8 @@ def main():
|
|||
allocation_pool_start=dict(default=None),
|
||||
allocation_pool_end=dict(default=None),
|
||||
host_routes=dict(default=None, type='list'),
|
||||
ipv6_ra_mode=dict(default=None, choice=ipv6_mode_choices),
|
||||
ipv6_address_mode=dict(default=None, choice=ipv6_mode_choices),
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
)
|
||||
|
||||
|
@ -196,6 +253,8 @@ def main():
|
|||
pool_start = module.params['allocation_pool_start']
|
||||
pool_end = module.params['allocation_pool_end']
|
||||
host_routes = module.params['host_routes']
|
||||
ipv6_ra_mode = module.params['ipv6_ra_mode']
|
||||
ipv6_a_mode = module.params['ipv6_address_mode']
|
||||
|
||||
# Check for required parameters when state == 'present'
|
||||
if state == 'present':
|
||||
|
@ -215,7 +274,8 @@ def main():
|
|||
subnet = cloud.get_subnet(subnet_name)
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=_system_state_change(module, subnet))
|
||||
module.exit_json(changed=_system_state_change(module, subnet,
|
||||
cloud))
|
||||
|
||||
if state == 'present':
|
||||
if not subnet:
|
||||
|
@ -226,10 +286,12 @@ def main():
|
|||
gateway_ip=gateway_ip,
|
||||
dns_nameservers=dns,
|
||||
allocation_pools=pool,
|
||||
host_routes=host_routes)
|
||||
host_routes=host_routes,
|
||||
ipv6_ra_mode=ipv6_ra_mode,
|
||||
ipv6_address_mode=ipv6_a_mode)
|
||||
changed = True
|
||||
else:
|
||||
if _needs_update(subnet, module):
|
||||
if _needs_update(subnet, module, cloud):
|
||||
cloud.update_subnet(subnet['id'],
|
||||
subnet_name=subnet_name,
|
||||
enable_dhcp=enable_dhcp,
|
||||
|
|
|
@ -97,7 +97,9 @@ def rax_facts(module, address, name, server_id):
|
|||
servers.append(cs.servers.get(server_id))
|
||||
except Exception, e:
|
||||
pass
|
||||
|
||||
|
||||
servers[:] = [server for server in servers if server.status != "DELETED"]
|
||||
|
||||
if len(servers) > 1:
|
||||
module.fail_json(msg='Multiple servers found matching provided '
|
||||
'search parameters')
|
||||
|
|
|
@ -1,6 +1,20 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# TODO:
|
||||
# Ability to set CPU/Memory reservations
|
||||
|
@ -65,13 +79,13 @@ options:
|
|||
default: null
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the vm.
|
||||
- Indicate desired state of the vm. 'reconfigured' only applies changes to 'memory_mb' and 'num_cpus' in vm_hardware parameter, and only when hot-plugging is enabled for the guest.
|
||||
default: present
|
||||
choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured']
|
||||
from_template:
|
||||
version_added: "1.9"
|
||||
description:
|
||||
- Specifies if the VM should be deployed from a template (cannot be ran with state)
|
||||
- Specifies if the VM should be deployed from a template (mutually exclusive with 'state' parameter). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template.
|
||||
default: no
|
||||
choices: ['yes', 'no']
|
||||
template_src:
|
||||
|
@ -79,6 +93,12 @@ options:
|
|||
description:
|
||||
- Name of the source template to deploy from
|
||||
default: None
|
||||
snapshot_to_clone:
|
||||
description:
|
||||
- A string that when specified, will create a linked clone copy of the VM. Snapshot must already be taken in vCenter.
|
||||
version_added: "2.0"
|
||||
required: false
|
||||
default: none
|
||||
vm_disk:
|
||||
description:
|
||||
- A key, value list of disks and their sizes and which datastore to keep it in.
|
||||
|
@ -132,6 +152,7 @@ EXAMPLES = '''
|
|||
# Returns changed = True and a adds ansible_facts from the new VM
|
||||
# State will set the power status of a guest upon creation. Use powered_on to create and boot.
|
||||
# Options ['state', 'vm_extra_config', 'vm_disk', 'vm_nic', 'vm_hardware', 'esxi'] are required together
|
||||
# Note: vm_floppy support added in 2.0
|
||||
|
||||
- vsphere_guest:
|
||||
vcenter_hostname: vcenter.mydomain.local
|
||||
|
@ -165,6 +186,9 @@ EXAMPLES = '''
|
|||
vm_cdrom:
|
||||
type: "iso"
|
||||
iso_path: "DatastoreName/cd-image.iso"
|
||||
vm_floppy:
|
||||
type: "image"
|
||||
image_path: "DatastoreName/floppy-image.flp"
|
||||
esxi:
|
||||
datacenter: MyDatacenter
|
||||
hostname: esx001.mydomain.local
|
||||
|
@ -202,7 +226,6 @@ EXAMPLES = '''
|
|||
hostname: esx001.mydomain.local
|
||||
|
||||
# Deploy a guest from a template
|
||||
# No reconfiguration of the destination guest is done at this stage, a reconfigure would be needed to adjust memory/cpu etc..
|
||||
- vsphere_guest:
|
||||
vcenter_hostname: vcenter.mydomain.local
|
||||
username: myuser
|
||||
|
@ -357,6 +380,44 @@ def add_cdrom(module, s, config_target, config, devices, default_devs, type="cli
|
|||
devices.append(cd_spec)
|
||||
|
||||
|
||||
def add_floppy(module, s, config_target, config, devices, default_devs, type="image", vm_floppy_image_path=None):
|
||||
# Add a floppy
|
||||
# Make sure the datastore exists.
|
||||
if vm_floppy_image_path:
|
||||
image_location = vm_floppy_image_path.split('/', 1)
|
||||
datastore, ds = find_datastore(
|
||||
module, s, image_location[0], config_target)
|
||||
image_path = image_location[1]
|
||||
|
||||
floppy_spec = config.new_deviceChange()
|
||||
floppy_spec.set_element_operation('add')
|
||||
floppy_ctrl = VI.ns0.VirtualFloppy_Def("floppy_ctrl").pyclass()
|
||||
|
||||
if type == "image":
|
||||
image = VI.ns0.VirtualFloppyImageBackingInfo_Def("image").pyclass()
|
||||
ds_ref = image.new_datastore(ds)
|
||||
ds_ref.set_attribute_type(ds.get_attribute_type())
|
||||
image.set_element_datastore(ds_ref)
|
||||
image.set_element_fileName("%s %s" % (datastore, image_path))
|
||||
floppy_ctrl.set_element_backing(image)
|
||||
floppy_ctrl.set_element_key(3)
|
||||
floppy_spec.set_element_device(floppy_ctrl)
|
||||
elif type == "client":
|
||||
client = VI.ns0.VirtualFloppyRemoteDeviceBackingInfo_Def(
|
||||
"client").pyclass()
|
||||
client.set_element_deviceName("/dev/fd0")
|
||||
floppy_ctrl.set_element_backing(client)
|
||||
floppy_ctrl.set_element_key(3)
|
||||
floppy_spec.set_element_device(floppy_ctrl)
|
||||
else:
|
||||
s.disconnect()
|
||||
module.fail_json(
|
||||
msg="Error adding floppy of type %s to vm spec. "
|
||||
" floppy type can either be image or client" % (type))
|
||||
|
||||
devices.append(floppy_spec)
|
||||
|
||||
|
||||
def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name="VM Network", network_type="standard"):
|
||||
# add a NIC
|
||||
# Different network card types are: "VirtualE1000",
|
||||
|
@ -530,7 +591,7 @@ def vmdisk_id(vm, current_datastore_name):
|
|||
return id_list
|
||||
|
||||
|
||||
def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name):
|
||||
def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, snapshot_to_clone):
|
||||
vmTemplate = vsphere_client.get_vm_by_name(template_src)
|
||||
vmTarget = None
|
||||
|
||||
|
@ -614,9 +675,14 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
|
|||
try:
|
||||
if vmTarget:
|
||||
changed = False
|
||||
elif snapshot_to_clone is not None:
|
||||
#check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone.
|
||||
vmTemplate.clone(guest, resourcepool=rpmor, linked=True, snapshot=snapshot_to_clone)
|
||||
changed = True
|
||||
else:
|
||||
vmTemplate.clone(guest, resourcepool=rpmor)
|
||||
changed = True
|
||||
|
||||
vsphere_client.disconnect()
|
||||
module.exit_json(changed=changed)
|
||||
except Exception as e:
|
||||
|
@ -922,6 +988,27 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest,
|
|||
# Add a CD-ROM device to the VM.
|
||||
add_cdrom(module, vsphere_client, config_target, config, devices,
|
||||
default_devs, cdrom_type, cdrom_iso_path)
|
||||
if 'vm_floppy' in vm_hardware:
|
||||
floppy_image_path = None
|
||||
floppy_type = None
|
||||
try:
|
||||
floppy_type = vm_hardware['vm_floppy']['type']
|
||||
except KeyError:
|
||||
vsphere_client.disconnect()
|
||||
module.fail_json(
|
||||
msg="Error on %s definition. floppy type needs to be"
|
||||
" specified." % vm_hardware['vm_floppy'])
|
||||
if floppy_type == 'image':
|
||||
try:
|
||||
floppy_image_path = vm_hardware['vm_floppy']['image_path']
|
||||
except KeyError:
|
||||
vsphere_client.disconnect()
|
||||
module.fail_json(
|
||||
msg="Error on %s definition. floppy image_path needs"
|
||||
" to be specified." % vm_hardware['vm_floppy'])
|
||||
# Add a floppy to the VM.
|
||||
add_floppy(module, vsphere_client, config_target, config, devices,
|
||||
default_devs, floppy_type, floppy_image_path)
|
||||
if vm_nic:
|
||||
for nic in sorted(vm_nic.iterkeys()):
|
||||
try:
|
||||
|
@ -1218,9 +1305,10 @@ def main():
|
|||
'reconfigured'
|
||||
],
|
||||
default='present'),
|
||||
vmware_guest_facts=dict(required=False, choices=BOOLEANS),
|
||||
from_template=dict(required=False, choices=BOOLEANS),
|
||||
vmware_guest_facts=dict(required=False, type='bool'),
|
||||
from_template=dict(required=False, type='bool'),
|
||||
template_src=dict(required=False, type='str'),
|
||||
snapshot_to_clone=dict(required=False, default=None, type='str'),
|
||||
guest=dict(required=True, type='str'),
|
||||
vm_disk=dict(required=False, type='dict', default={}),
|
||||
vm_nic=dict(required=False, type='dict', default={}),
|
||||
|
@ -1229,7 +1317,7 @@ def main():
|
|||
vm_hw_version=dict(required=False, default=None, type='str'),
|
||||
resource_pool=dict(required=False, default=None, type='str'),
|
||||
cluster=dict(required=False, default=None, type='str'),
|
||||
force=dict(required=False, choices=BOOLEANS, default=False),
|
||||
force=dict(required=False, type='bool', default=False),
|
||||
esxi=dict(required=False, type='dict', default={}),
|
||||
|
||||
|
||||
|
@ -1245,8 +1333,7 @@ def main():
|
|||
'vm_hardware',
|
||||
'esxi'
|
||||
],
|
||||
['resource_pool', 'cluster'],
|
||||
['from_template', 'resource_pool', 'template_src']
|
||||
['from_template', 'template_src'],
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -1270,6 +1357,8 @@ def main():
|
|||
cluster = module.params['cluster']
|
||||
template_src = module.params['template_src']
|
||||
from_template = module.params['from_template']
|
||||
snapshot_to_clone = module.params['snapshot_to_clone']
|
||||
|
||||
|
||||
# CONNECT TO THE SERVER
|
||||
viserver = VIServer()
|
||||
|
@ -1349,7 +1438,8 @@ def main():
|
|||
guest=guest,
|
||||
template_src=template_src,
|
||||
module=module,
|
||||
cluster_name=cluster
|
||||
cluster_name=cluster,
|
||||
snapshot_to_clone=snapshot_to_clone
|
||||
)
|
||||
if state in ['restarted', 'reconfigured']:
|
||||
module.fail_json(
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
import copy
|
||||
import sys
|
||||
import datetime
|
||||
import glob
|
||||
import traceback
|
||||
import re
|
||||
import shlex
|
||||
|
@ -29,8 +30,8 @@ import os
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: command
|
||||
version_added: historical
|
||||
short_description: Executes a command on a remote node
|
||||
version_added: historical
|
||||
description:
|
||||
- The M(command) module takes the command name followed by a list of space-delimited arguments.
|
||||
- The given command will be executed on all selected nodes. It will not be
|
||||
|
@ -44,15 +45,14 @@ options:
|
|||
See the examples!
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
creates:
|
||||
description:
|
||||
- a filename, when it already exists, this step will B(not) be run.
|
||||
- a filename or glob pattern, when it already exists, this step will B(not) be run.
|
||||
required: no
|
||||
default: null
|
||||
removes:
|
||||
description:
|
||||
- a filename, when it does not exist, this step will B(not) be run.
|
||||
- a filename or glob pattern, when it does not exist, this step will B(not) be run.
|
||||
version_added: "0.8"
|
||||
required: no
|
||||
default: null
|
||||
|
@ -143,12 +143,15 @@ def check_command(commandline):
|
|||
'mount': 'mount', 'rpm': 'yum', 'yum': 'yum', 'apt-get': 'apt-get',
|
||||
'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'template or lineinfile',
|
||||
'rsync': 'synchronize' }
|
||||
become = [ 'sudo', 'su', 'pbrun', 'pfexec', 'runas' ]
|
||||
warnings = list()
|
||||
command = os.path.basename(commandline.split()[0])
|
||||
if command in arguments:
|
||||
warnings.append("Consider using file module with %s rather than running %s" % (arguments[command], command))
|
||||
if command in commands:
|
||||
warnings.append("Consider using %s module rather than running %s" % (commands[command], command))
|
||||
if command in become:
|
||||
warnings.append("Consider using 'become', 'become_method', and 'become_user' rather than running %s" % (command,))
|
||||
return warnings
|
||||
|
||||
|
||||
|
@ -188,7 +191,7 @@ def main():
|
|||
# and the filename already exists. This allows idempotence
|
||||
# of command executions.
|
||||
v = os.path.expanduser(creates)
|
||||
if os.path.exists(v):
|
||||
if glob.glob(v):
|
||||
module.exit_json(
|
||||
cmd=args,
|
||||
stdout="skipped, since %s exists" % v,
|
||||
|
@ -202,7 +205,7 @@ def main():
|
|||
# and the filename does not exist. This allows idempotence
|
||||
# of command executions.
|
||||
v = os.path.expanduser(removes)
|
||||
if not os.path.exists(v):
|
||||
if not glob.glob(v):
|
||||
module.exit_json(
|
||||
cmd=args,
|
||||
stdout="skipped, since %s does not exist" % v,
|
||||
|
|
|
@ -1,10 +1,25 @@
|
|||
# this is a virtual module that is entirely implemented server side
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: raw
|
||||
version_added: historical
|
||||
short_description: Executes a low-down and dirty SSH command
|
||||
version_added: historical
|
||||
options:
|
||||
free_form:
|
||||
description:
|
||||
|
@ -15,7 +30,7 @@ options:
|
|||
- change the shell used to execute the command. Should be an absolute path to the executable.
|
||||
required: false
|
||||
version_added: "1.0"
|
||||
description:
|
||||
description:
|
||||
- Executes a low-down and dirty SSH command, not going through the module
|
||||
subsystem. This is useful and should only be done in two cases. The
|
||||
first case is installing C(python-simplejson) on older (Python 2.4 and
|
||||
|
@ -34,7 +49,7 @@ notes:
|
|||
playbooks will follow the trend of using M(command) unless M(shell) is
|
||||
explicitly required. When running ad-hoc commands, use your best
|
||||
judgement.
|
||||
author:
|
||||
author:
|
||||
- Ansible Core Team
|
||||
- Michael DeHaan
|
||||
'''
|
||||
|
|
|
@ -1,3 +1,17 @@
|
|||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
|
|
|
@ -2,6 +2,21 @@
|
|||
# it runs the 'command' module with special arguments and it behaves differently.
|
||||
# See the command source and the comment "#USE_SHELL".
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: shell
|
||||
|
|
|
@ -83,7 +83,8 @@ options:
|
|||
required: false
|
||||
notes:
|
||||
- Requires the MySQLdb Python package on the remote host. For Ubuntu, this
|
||||
is as easy as apt-get install python-mysqldb. (See M(apt).)
|
||||
is as easy as apt-get install python-mysqldb. (See M(apt).) For CentOS/Fedora, this
|
||||
is as easy as yum install MySQL-python. (See M(yum).)
|
||||
- Both I(login_password) and I(login_user) are required when you are
|
||||
passing credentials. If none are present, the module will attempt to read
|
||||
the credentials from C(~/.my.cnf), and finally fall back to using the MySQL
|
||||
|
@ -326,7 +327,7 @@ def main():
|
|||
if state in ['dump','import']:
|
||||
if target is None:
|
||||
module.fail_json(msg="with state=%s target is required" % (state))
|
||||
if db == 'all':
|
||||
if db == 'all':
|
||||
connect_to_db = 'mysql'
|
||||
db = 'mysql'
|
||||
all_databases = True
|
||||
|
|
|
@ -109,7 +109,7 @@ options:
|
|||
notes:
|
||||
- Requires the MySQLdb Python package on the remote host. For Ubuntu, this
|
||||
is as easy as apt-get install python-mysqldb.
|
||||
- Both C(login_password) and C(login_username) are required when you are
|
||||
- Both C(login_password) and C(login_user) are required when you are
|
||||
passing credentials. If none are present, the module will attempt to read
|
||||
the credentials from C(~/.my.cnf), and finally fall back to using the MySQL
|
||||
default login of 'root' with no password.
|
||||
|
@ -157,6 +157,7 @@ password=n<_665{vS43y
|
|||
|
||||
import getpass
|
||||
import tempfile
|
||||
import re
|
||||
try:
|
||||
import MySQLdb
|
||||
except ImportError:
|
||||
|
@ -291,7 +292,7 @@ def privileges_get(cursor, user,host):
|
|||
return x
|
||||
|
||||
for grant in grants:
|
||||
res = re.match("GRANT (.+) ON (.+) TO '.+'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0])
|
||||
res = re.match("GRANT (.+) ON (.+) TO '.*'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0])
|
||||
if res is None:
|
||||
raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0])
|
||||
privileges = res.group(1).split(", ")
|
||||
|
@ -316,13 +317,22 @@ def privileges_unpack(priv):
|
|||
not specified in the string, as MySQL will always provide this by default.
|
||||
"""
|
||||
output = {}
|
||||
privs = []
|
||||
for item in priv.strip().split('/'):
|
||||
pieces = item.strip().split(':')
|
||||
dbpriv = pieces[0].rsplit(".", 1)
|
||||
pieces[0] = "`%s`.%s" % (dbpriv[0].strip('`'), dbpriv[1])
|
||||
# Do not escape if privilege is for database '*' (all databases)
|
||||
if dbpriv[0].strip('`') != '*':
|
||||
pieces[0] = "`%s`.%s" % (dbpriv[0].strip('`'), dbpriv[1])
|
||||
|
||||
output[pieces[0]] = [s.strip() for s in pieces[1].upper().split(',')]
|
||||
new_privs = frozenset(output[pieces[0]])
|
||||
if '(' in pieces[1]:
|
||||
output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper())
|
||||
for i in output[pieces[0]]:
|
||||
privs.append(re.sub(r'\(.*\)','',i))
|
||||
else:
|
||||
output[pieces[0]] = pieces[1].upper().split(',')
|
||||
privs = output[pieces[0]]
|
||||
new_privs = frozenset(privs)
|
||||
if not new_privs.issubset(VALID_PRIVS):
|
||||
raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS))
|
||||
|
||||
|
|
|
@ -52,6 +52,11 @@ options:
|
|||
description:
|
||||
- mysql host to connect
|
||||
required: False
|
||||
login_port:
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- mysql port to connect
|
||||
required: False
|
||||
login_unix_socket:
|
||||
description:
|
||||
- unix socket to connect mysql server
|
||||
|
@ -68,6 +73,7 @@ EXAMPLES = '''
|
|||
import ConfigParser
|
||||
import os
|
||||
import warnings
|
||||
from re import match
|
||||
|
||||
try:
|
||||
import MySQLdb
|
||||
|
@ -104,10 +110,12 @@ def typedvalue(value):
|
|||
|
||||
|
||||
def getvariable(cursor, mysqlvar):
|
||||
cursor.execute("SHOW VARIABLES LIKE %s", (mysqlvar,))
|
||||
cursor.execute("SHOW VARIABLES WHERE Variable_name = %s", (mysqlvar,))
|
||||
mysqlvar_val = cursor.fetchall()
|
||||
return mysqlvar_val
|
||||
|
||||
if len(mysqlvar_val) is 1:
|
||||
return mysqlvar_val[0][1]
|
||||
else:
|
||||
return None
|
||||
|
||||
def setvariable(cursor, mysqlvar, value):
|
||||
""" Set a global mysql variable to a given value
|
||||
|
@ -117,11 +125,9 @@ def setvariable(cursor, mysqlvar, value):
|
|||
should be passed as numeric literals.
|
||||
|
||||
"""
|
||||
query = ["SET GLOBAL %s" % mysql_quote_identifier(mysqlvar, 'vars') ]
|
||||
query.append(" = %s")
|
||||
query = ' '.join(query)
|
||||
query = "SET GLOBAL %s = " % mysql_quote_identifier(mysqlvar, 'vars')
|
||||
try:
|
||||
cursor.execute(query, (value,))
|
||||
cursor.execute(query + "%s", (value,))
|
||||
cursor.fetchall()
|
||||
result = True
|
||||
except Exception, e:
|
||||
|
@ -193,7 +199,8 @@ def main():
|
|||
argument_spec = dict(
|
||||
login_user=dict(default=None),
|
||||
login_password=dict(default=None),
|
||||
login_host=dict(default="localhost"),
|
||||
login_host=dict(default="127.0.0.1"),
|
||||
login_port=dict(default="3306", type='int'),
|
||||
login_unix_socket=dict(default=None),
|
||||
variable=dict(default=None),
|
||||
value=dict(default=None)
|
||||
|
@ -203,8 +210,13 @@ def main():
|
|||
user = module.params["login_user"]
|
||||
password = module.params["login_password"]
|
||||
host = module.params["login_host"]
|
||||
port = module.params["login_port"]
|
||||
mysqlvar = module.params["variable"]
|
||||
value = module.params["value"]
|
||||
if mysqlvar is None:
|
||||
module.fail_json(msg="Cannot run without variable to operate with")
|
||||
if match('^[0-9a-z_]+$', mysqlvar) is None:
|
||||
module.fail_json(msg="invalid variable name \"%s\"" % mysqlvar)
|
||||
if not mysqldb_found:
|
||||
module.fail_json(msg="the python mysqldb module is required")
|
||||
else:
|
||||
|
@ -227,23 +239,21 @@ def main():
|
|||
module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided")
|
||||
try:
|
||||
if module.params["login_unix_socket"]:
|
||||
db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql")
|
||||
db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql")
|
||||
else:
|
||||
db_connection = MySQLdb.connect(host=module.params["login_host"], user=login_user, passwd=login_password, db="mysql")
|
||||
db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql")
|
||||
cursor = db_connection.cursor()
|
||||
except Exception, e:
|
||||
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials")
|
||||
if mysqlvar is None:
|
||||
module.fail_json(msg="Cannot run without variable to operate with")
|
||||
mysqlvar_val = getvariable(cursor, mysqlvar)
|
||||
if mysqlvar_val is None:
|
||||
module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False)
|
||||
if value is None:
|
||||
module.exit_json(msg=mysqlvar_val)
|
||||
else:
|
||||
if len(mysqlvar_val) < 1:
|
||||
module.fail_json(msg="Variable not available", changed=False)
|
||||
# Type values before using them
|
||||
value_wanted = typedvalue(value)
|
||||
value_actual = typedvalue(mysqlvar_val[0][1])
|
||||
value_actual = typedvalue(mysqlvar_val)
|
||||
if value_wanted == value_actual:
|
||||
module.exit_json(msg="Variable already set to requested value", changed=False)
|
||||
try:
|
||||
|
|
|
@ -315,7 +315,7 @@ class Connection(object):
|
|||
query = """SELECT relname
|
||||
FROM pg_catalog.pg_class c
|
||||
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
|
||||
WHERE nspname = %s AND relkind = 'r'"""
|
||||
WHERE nspname = %s AND relkind in ('r', 'v')"""
|
||||
self.cursor.execute(query, (schema,))
|
||||
return [t[0] for t in self.cursor.fetchall()]
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ options:
|
|||
description:
|
||||
- "PostgreSQL role attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER"
|
||||
required: false
|
||||
default: null
|
||||
default: ""
|
||||
choices: [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB",
|
||||
"[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ]
|
||||
state:
|
||||
|
@ -233,7 +233,7 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir
|
|||
return False
|
||||
|
||||
# Handle passwords.
|
||||
if not no_password_changes and (password is not None or role_attr_flags is not None):
|
||||
if not no_password_changes and (password is not None or role_attr_flags != ''):
|
||||
# Select password and all flag-like columns in order to verify changes.
|
||||
query_password_data = dict(password=password, expires=expires)
|
||||
select = "SELECT * FROM pg_authid where rolname=%(user)s"
|
||||
|
@ -490,10 +490,10 @@ def parse_role_attrs(role_attr_flags):
|
|||
|
||||
def normalize_privileges(privs, type_):
|
||||
new_privs = set(privs)
|
||||
if 'ALL' in privs:
|
||||
if 'ALL' in new_privs:
|
||||
new_privs.update(VALID_PRIVS[type_])
|
||||
new_privs.remove('ALL')
|
||||
if 'TEMP' in privs:
|
||||
if 'TEMP' in new_privs:
|
||||
new_privs.add('TEMPORARY')
|
||||
new_privs.remove('TEMP')
|
||||
|
||||
|
|
263
files/acl.py
263
files/acl.py
|
@ -1,4 +1,5 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
|
@ -20,7 +21,9 @@ module: acl
|
|||
version_added: "1.4"
|
||||
short_description: Sets and retrieves file ACL information.
|
||||
description:
|
||||
- Sets and retrieves file ACL information.
|
||||
- Sets and retrieves file ACL information.
|
||||
notes:
|
||||
- As of Ansible 2.0, this module only supports Linux distributions.
|
||||
options:
|
||||
name:
|
||||
required: true
|
||||
|
@ -79,7 +82,16 @@ options:
|
|||
description:
|
||||
- DEPRECATED. The acl to set or remove. This must always be quoted in the form of '<etype>:<qualifier>:<perms>'. The qualifier may be empty for some types, but the type and perms are always requried. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields.
|
||||
|
||||
author: "Brian Coca (@bcoca)"
|
||||
recursive:
|
||||
version_added: "2.0"
|
||||
required: false
|
||||
default: no
|
||||
choices: [ 'yes', 'no' ]
|
||||
description:
|
||||
- Recursively sets the specified ACL (added in Ansible 2.0). Incompatible with C(state=query).
|
||||
author:
|
||||
- "Brian Coca (@bcoca)"
|
||||
- "Jérémie Astori (@astorije)"
|
||||
notes:
|
||||
- The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed.
|
||||
'''
|
||||
|
@ -110,35 +122,15 @@ acl:
|
|||
sample: [ "user::rwx", "group::rwx", "other::rwx" ]
|
||||
'''
|
||||
|
||||
def normalize_permissions(p):
|
||||
perms = ['-','-','-']
|
||||
for char in p:
|
||||
if char == 'r':
|
||||
perms[0] = 'r'
|
||||
if char == 'w':
|
||||
perms[1] = 'w'
|
||||
if char == 'x':
|
||||
perms[2] = 'x'
|
||||
if char == 'X':
|
||||
if perms[2] != 'x': # 'x' is more permissive
|
||||
perms[2] = 'X'
|
||||
return ''.join(perms)
|
||||
|
||||
def split_entry(entry):
|
||||
''' splits entry and ensures normalized return'''
|
||||
|
||||
a = entry.split(':')
|
||||
a.reverse()
|
||||
if len(a) == 3:
|
||||
a.append(False)
|
||||
try:
|
||||
p,e,t,d = a
|
||||
except ValueError, e:
|
||||
print "wtf?? %s => %s" % (entry,a)
|
||||
raise e
|
||||
if len(a) == 2:
|
||||
a.append(None)
|
||||
|
||||
if d:
|
||||
d = True
|
||||
t, e, p = a
|
||||
|
||||
if t.startswith("u"):
|
||||
t = "user"
|
||||
|
@ -151,69 +143,98 @@ def split_entry(entry):
|
|||
else:
|
||||
t = None
|
||||
|
||||
p = normalize_permissions(p)
|
||||
return [t, e, p]
|
||||
|
||||
return [d,t,e,p]
|
||||
|
||||
def get_acls(module,path,follow):
|
||||
def build_entry(etype, entity, permissions=None):
|
||||
'''Builds and returns an entry string. Does not include the permissions bit if they are not provided.'''
|
||||
if permissions:
|
||||
return etype + ':' + entity + ':' + permissions
|
||||
else:
|
||||
return etype + ':' + entity
|
||||
|
||||
|
||||
def build_command(module, mode, path, follow, default, recursive, entry=''):
|
||||
'''Builds and returns a getfacl/setfacl command.'''
|
||||
if mode == 'set':
|
||||
cmd = [module.get_bin_path('setfacl', True)]
|
||||
cmd.append('-m "%s"' % entry)
|
||||
elif mode == 'rm':
|
||||
cmd = [module.get_bin_path('setfacl', True)]
|
||||
cmd.append('-x "%s"' % entry)
|
||||
else: # mode == 'get'
|
||||
cmd = [module.get_bin_path('getfacl', True)]
|
||||
# prevents absolute path warnings and removes headers
|
||||
cmd.append('--omit-header')
|
||||
cmd.append('--absolute-names')
|
||||
|
||||
if recursive:
|
||||
cmd.append('--recursive')
|
||||
|
||||
cmd = [ module.get_bin_path('getfacl', True) ]
|
||||
if not follow:
|
||||
cmd.append('-h')
|
||||
# prevents absolute path warnings and removes headers
|
||||
cmd.append('--omit-header')
|
||||
cmd.append('--absolute-names')
|
||||
cmd.append(path)
|
||||
cmd.append('--physical')
|
||||
|
||||
return _run_acl(module,cmd)
|
||||
|
||||
def set_acl(module,path,entry,follow,default):
|
||||
|
||||
cmd = [ module.get_bin_path('setfacl', True) ]
|
||||
if not follow:
|
||||
cmd.append('-h')
|
||||
if default:
|
||||
cmd.append('-d')
|
||||
cmd.append('-m "%s"' % entry)
|
||||
if(mode == 'rm'):
|
||||
cmd.append('-k')
|
||||
else: # mode == 'set' or mode == 'get'
|
||||
cmd.append('-d')
|
||||
|
||||
cmd.append(path)
|
||||
return cmd
|
||||
|
||||
return _run_acl(module,cmd)
|
||||
|
||||
def rm_acl(module,path,entry,follow,default):
|
||||
def acl_changed(module, cmd):
|
||||
'''Returns true if the provided command affects the existing ACLs, false otherwise.'''
|
||||
cmd = cmd[:] # lists are mutables so cmd would be overriden without this
|
||||
cmd.insert(1, '--test')
|
||||
lines = run_acl(module, cmd)
|
||||
|
||||
cmd = [ module.get_bin_path('setfacl', True) ]
|
||||
if not follow:
|
||||
cmd.append('-h')
|
||||
if default:
|
||||
cmd.append('-k')
|
||||
entry = entry[0:entry.rfind(':')]
|
||||
cmd.append('-x "%s"' % entry)
|
||||
cmd.append(path)
|
||||
for line in lines:
|
||||
if not line.endswith('*,*'):
|
||||
return True
|
||||
return False
|
||||
|
||||
return _run_acl(module,cmd,False)
|
||||
|
||||
def _run_acl(module,cmd,check_rc=True):
|
||||
def run_acl(module, cmd, check_rc=True):
|
||||
|
||||
try:
|
||||
(rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
|
||||
except Exception, e:
|
||||
module.fail_json(msg=e.strerror)
|
||||
|
||||
# trim last line as it is always empty
|
||||
ret = out.splitlines()
|
||||
return ret[0:len(ret)-1]
|
||||
lines = out.splitlines()
|
||||
if lines and not lines[-1].split():
|
||||
# trim last line only when it is empty
|
||||
return lines[:-1]
|
||||
else:
|
||||
return lines
|
||||
|
||||
|
||||
def main():
|
||||
if get_platform().lower() != 'linux':
|
||||
module.fail_json(msg="The acl module is only available for Linux distributions.")
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
name = dict(required=True,aliases=['path'], type='str'),
|
||||
entry = dict(required=False, etype='str'),
|
||||
entity = dict(required=False, type='str', default=''),
|
||||
etype = dict(required=False, choices=['other', 'user', 'group', 'mask'], type='str'),
|
||||
permissions = dict(required=False, type='str'),
|
||||
state = dict(required=False, default='query', choices=[ 'query', 'present', 'absent' ], type='str'),
|
||||
follow = dict(required=False, type='bool', default=True),
|
||||
default= dict(required=False, type='bool', default=False),
|
||||
argument_spec=dict(
|
||||
name=dict(required=True, aliases=['path'], type='str'),
|
||||
entry=dict(required=False, type='str'),
|
||||
entity=dict(required=False, type='str', default=''),
|
||||
etype=dict(
|
||||
required=False,
|
||||
choices=['other', 'user', 'group', 'mask'],
|
||||
type='str'
|
||||
),
|
||||
permissions=dict(required=False, type='str'),
|
||||
state=dict(
|
||||
required=False,
|
||||
default='query',
|
||||
choices=['query', 'present', 'absent'],
|
||||
type='str'
|
||||
),
|
||||
follow=dict(required=False, type='bool', default=True),
|
||||
default=dict(required=False, type='bool', default=False),
|
||||
recursive=dict(required=False, type='bool', default=False),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
@ -226,79 +247,75 @@ def main():
|
|||
state = module.params.get('state')
|
||||
follow = module.params.get('follow')
|
||||
default = module.params.get('default')
|
||||
|
||||
if permissions:
|
||||
permissions = normalize_permissions(permissions)
|
||||
recursive = module.params.get('recursive')
|
||||
|
||||
if not os.path.exists(path):
|
||||
module.fail_json(msg="path not found or not accessible!")
|
||||
module.fail_json(msg="Path not found or not accessible.")
|
||||
|
||||
if state in ['present','absent']:
|
||||
if not entry and not etype:
|
||||
module.fail_json(msg="%s requires either etype and permissions or just entry be set" % state)
|
||||
if state == 'query' and recursive:
|
||||
module.fail_json(msg="'recursive' MUST NOT be set when 'state=query'.")
|
||||
|
||||
if not entry:
|
||||
if state == 'absent' and permissions:
|
||||
module.fail_json(msg="'permissions' MUST NOT be set when 'state=absent'.")
|
||||
|
||||
if state == 'absent' and not entity:
|
||||
module.fail_json(msg="'entity' MUST be set when 'state=absent'.")
|
||||
|
||||
if state in ['present', 'absent'] and not etype:
|
||||
module.fail_json(msg="'etype' MUST be set when 'state=%s'." % state)
|
||||
|
||||
if entry:
|
||||
if etype or entity or permissions:
|
||||
module.fail_json(msg="entry and another incompatible field (entity, etype or permissions) are also set")
|
||||
if entry.count(":") not in [2,3]:
|
||||
module.fail_json(msg="Invalid entry: '%s', it requires 3 or 4 sections divided by ':'" % entry)
|
||||
module.fail_json(msg="'entry' MUST NOT be set when 'entity', 'etype' or 'permissions' are set.")
|
||||
|
||||
default, etype, entity, permissions = split_entry(entry)
|
||||
if state == 'present' and entry.count(":") != 2:
|
||||
module.fail_json(msg="'entry' MUST have 3 sections divided by ':' when 'state=present'.")
|
||||
|
||||
changed=False
|
||||
if state == 'absent' and entry.count(":") != 1:
|
||||
module.fail_json(msg="'entry' MUST have 2 sections divided by ':' when 'state=absent'.")
|
||||
|
||||
if state == 'query':
|
||||
module.fail_json(msg="'entry' MUST NOT be set when 'state=query'.")
|
||||
|
||||
etype, entity, permissions = split_entry(entry)
|
||||
|
||||
changed = False
|
||||
msg = ""
|
||||
currentacls = get_acls(module,path,follow)
|
||||
|
||||
if (state == 'present'):
|
||||
matched = False
|
||||
for oldentry in currentacls:
|
||||
if oldentry.count(":") == 0:
|
||||
continue
|
||||
old_default, old_type, old_entity, old_permissions = split_entry(oldentry)
|
||||
if old_default == default:
|
||||
if old_type == etype:
|
||||
if etype in ['user', 'group']:
|
||||
if old_entity == entity:
|
||||
matched = True
|
||||
if not old_permissions == permissions:
|
||||
changed = True
|
||||
break
|
||||
else:
|
||||
matched = True
|
||||
if not old_permissions == permissions:
|
||||
changed = True
|
||||
break
|
||||
if not matched:
|
||||
changed=True
|
||||
if state == 'present':
|
||||
entry = build_entry(etype, entity, permissions)
|
||||
command = build_command(
|
||||
module, 'set', path, follow,
|
||||
default, recursive, entry
|
||||
)
|
||||
changed = acl_changed(module, command)
|
||||
|
||||
if changed and not module.check_mode:
|
||||
set_acl(module,path,':'.join([etype, str(entity), permissions]),follow,default)
|
||||
msg="%s is present" % ':'.join([etype, str(entity), permissions])
|
||||
run_acl(module, command)
|
||||
msg = "%s is present" % entry
|
||||
|
||||
elif state == 'absent':
|
||||
for oldentry in currentacls:
|
||||
if oldentry.count(":") == 0:
|
||||
continue
|
||||
old_default, old_type, old_entity, old_permissions = split_entry(oldentry)
|
||||
if old_default == default:
|
||||
if old_type == etype:
|
||||
if etype in ['user', 'group']:
|
||||
if old_entity == entity:
|
||||
changed=True
|
||||
break
|
||||
else:
|
||||
changed=True
|
||||
break
|
||||
entry = build_entry(etype, entity)
|
||||
command = build_command(
|
||||
module, 'rm', path, follow,
|
||||
default, recursive, entry
|
||||
)
|
||||
changed = acl_changed(module, command)
|
||||
|
||||
if changed and not module.check_mode:
|
||||
rm_acl(module,path,':'.join([etype, entity, '---']),follow,default)
|
||||
msg="%s is absent" % ':'.join([etype, entity, '---'])
|
||||
else:
|
||||
msg="current acl"
|
||||
run_acl(module, command, False)
|
||||
msg = "%s is absent" % entry
|
||||
|
||||
if changed:
|
||||
currentacls = get_acls(module,path,follow)
|
||||
elif state == 'query':
|
||||
msg = "current acl"
|
||||
|
||||
module.exit_json(changed=changed, msg=msg, acl=currentacls)
|
||||
acl = run_acl(
|
||||
module,
|
||||
build_command(module, 'get', path, follow, default, recursive)
|
||||
)
|
||||
|
||||
module.exit_json(changed=changed, msg=msg, acl=acl)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
|
|
@ -79,8 +79,23 @@ options:
|
|||
U(http://docs.python.org/2/library/re.html).
|
||||
required: false
|
||||
default: null
|
||||
ignore_hidden:
|
||||
description:
|
||||
- A boolean that controls if files that start with a '.' will be included or not.
|
||||
required: false
|
||||
default: false
|
||||
version_added: "2.0"
|
||||
validate:
|
||||
description:
|
||||
- The validation command to run before copying into place. The path to the file to
|
||||
validate is passed in via '%s' which must be present as in the sshd example below.
|
||||
The command is passed securely so shell features like expansion and pipes won't work.
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.0"
|
||||
author: "Stephen Fromm (@sfromm)"
|
||||
extends_documentation_fragment: files
|
||||
extends_documentation_fragment:
|
||||
- files
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -89,12 +104,15 @@ EXAMPLES = '''
|
|||
|
||||
# When a delimiter is specified, it will be inserted in between each fragment
|
||||
- assemble: src=/etc/someapp/fragments dest=/etc/someapp/someapp.conf delimiter='### START FRAGMENT ###'
|
||||
|
||||
# Copy a new "sshd_config" file into place, after passing validation with sshd
|
||||
- assemble: src=/etc/ssh/conf.d/ dest=/etc/ssh/sshd_config validate='/usr/sbin/sshd -t -f %s'
|
||||
'''
|
||||
|
||||
# ===========================================
|
||||
# Support method
|
||||
|
||||
def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None):
|
||||
def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False):
|
||||
''' assemble a file from a directory of fragments '''
|
||||
tmpfd, temp_path = tempfile.mkstemp()
|
||||
tmp = os.fdopen(tmpfd,'w')
|
||||
|
@ -105,7 +123,7 @@ def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None):
|
|||
if compiled_regexp and not compiled_regexp.search(f):
|
||||
continue
|
||||
fragment = "%s/%s" % (src_path, f)
|
||||
if not os.path.isfile(fragment):
|
||||
if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
|
||||
continue
|
||||
fragment_content = file(fragment).read()
|
||||
|
||||
|
@ -148,6 +166,8 @@ def main():
|
|||
backup=dict(default=False, type='bool'),
|
||||
remote_src=dict(default=False, type='bool'),
|
||||
regexp = dict(required=False),
|
||||
ignore_hidden = dict(default=False, type='bool'),
|
||||
validate = dict(required=False, type='str'),
|
||||
),
|
||||
add_file_common_args=True
|
||||
)
|
||||
|
@ -162,6 +182,8 @@ def main():
|
|||
delimiter = module.params['delimiter']
|
||||
regexp = module.params['regexp']
|
||||
compiled_regexp = None
|
||||
ignore_hidden = module.params['ignore_hidden']
|
||||
validate = module.params.get('validate', None)
|
||||
|
||||
if not os.path.exists(src):
|
||||
module.fail_json(msg="Source (%s) does not exist" % src)
|
||||
|
@ -175,7 +197,7 @@ def main():
|
|||
except re.error, e:
|
||||
module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (e, regexp))
|
||||
|
||||
path = assemble_from_fragments(src, delimiter, compiled_regexp)
|
||||
path = assemble_from_fragments(src, delimiter, compiled_regexp, ignore_hidden)
|
||||
path_hash = module.sha1(path)
|
||||
|
||||
if os.path.exists(dest):
|
||||
|
@ -184,6 +206,13 @@ def main():
|
|||
if path_hash != dest_hash:
|
||||
if backup and dest_hash is not None:
|
||||
module.backup_local(dest)
|
||||
if validate:
|
||||
if "%s" not in validate:
|
||||
module.fail_json(msg="validate must contain %%s: %s" % validate)
|
||||
(rc, out, err) = module.run_command(validate % path)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc, err))
|
||||
|
||||
shutil.copy(path, dest)
|
||||
changed = True
|
||||
|
||||
|
|
|
@ -63,21 +63,13 @@ options:
|
|||
force:
|
||||
description:
|
||||
- the default is C(yes), which will replace the remote file when contents
|
||||
are different than the source. If C(no), the file will only be transferred
|
||||
are different than the source. If C(no), the file will only be transferred
|
||||
if the destination does not exist.
|
||||
version_added: "1.1"
|
||||
required: false
|
||||
choices: [ "yes", "no" ]
|
||||
default: "yes"
|
||||
aliases: [ "thirsty" ]
|
||||
validate:
|
||||
description:
|
||||
- The validation command to run before copying into place. The path to the file to
|
||||
validate is passed in via '%s' which must be present as in the visudo example below.
|
||||
The command is passed securely so shell features like expansion and pipes won't work.
|
||||
required: false
|
||||
default: ""
|
||||
version_added: "1.2"
|
||||
directory_mode:
|
||||
description:
|
||||
- When doing a recursive copy set the mode for the directories. If this is not set we will use the system
|
||||
|
@ -85,8 +77,10 @@ options:
|
|||
already existed.
|
||||
required: false
|
||||
version_added: "1.5"
|
||||
extends_documentation_fragment: files
|
||||
author:
|
||||
extends_documentation_fragment:
|
||||
- files
|
||||
- validate
|
||||
author:
|
||||
- "Ansible Core Team"
|
||||
- "Michael DeHaan"
|
||||
notes:
|
||||
|
@ -168,7 +162,7 @@ size:
|
|||
type: int
|
||||
sample: 1220
|
||||
state:
|
||||
description: permissions of the target, after execution
|
||||
description: state of the target, after execution
|
||||
returned: success
|
||||
type: string
|
||||
sample: "file"
|
||||
|
@ -226,6 +220,7 @@ def main():
|
|||
original_basename = module.params.get('original_basename',None)
|
||||
validate = module.params.get('validate',None)
|
||||
follow = module.params['follow']
|
||||
mode = module.params['mode']
|
||||
|
||||
if not os.path.exists(src):
|
||||
module.fail_json(msg="Source %s failed to transfer" % (src))
|
||||
|
@ -295,6 +290,11 @@ def main():
|
|||
os.unlink(dest)
|
||||
open(dest, 'w').close()
|
||||
if validate:
|
||||
# if we have a mode, make sure we set it on the temporary
|
||||
# file source as some validations may require it
|
||||
# FIXME: should we do the same for owner/group here too?
|
||||
if mode is not None:
|
||||
module.set_mode_if_different(src, mode, False)
|
||||
if "%s" not in validate:
|
||||
module.fail_json(msg="validate must contain %%s: %s" % (validate))
|
||||
(rc,out,err) = module.run_command(validate % src)
|
||||
|
|
|
@ -1,5 +1,20 @@
|
|||
# this is a virtual module that is entirely implemented server side
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: fetch
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import errno
|
||||
import shutil
|
||||
import stat
|
||||
import grp
|
||||
|
@ -270,20 +271,30 @@ def main():
|
|||
module.exit_json(changed=True)
|
||||
changed = True
|
||||
curpath = ''
|
||||
# Split the path so we can apply filesystem attributes recursively
|
||||
# from the root (/) directory for absolute paths or the base path
|
||||
# of a relative path. We can then walk the appropriate directory
|
||||
# path to apply attributes.
|
||||
for dirname in path.strip('/').split('/'):
|
||||
curpath = '/'.join([curpath, dirname])
|
||||
# Remove leading slash if we're creating a relative path
|
||||
if not os.path.isabs(path):
|
||||
curpath = curpath.lstrip('/')
|
||||
if not os.path.exists(curpath):
|
||||
os.mkdir(curpath)
|
||||
tmp_file_args = file_args.copy()
|
||||
tmp_file_args['path']=curpath
|
||||
changed = module.set_fs_attributes_if_different(tmp_file_args, changed)
|
||||
|
||||
try:
|
||||
# Split the path so we can apply filesystem attributes recursively
|
||||
# from the root (/) directory for absolute paths or the base path
|
||||
# of a relative path. We can then walk the appropriate directory
|
||||
# path to apply attributes.
|
||||
for dirname in path.strip('/').split('/'):
|
||||
curpath = '/'.join([curpath, dirname])
|
||||
# Remove leading slash if we're creating a relative path
|
||||
if not os.path.isabs(path):
|
||||
curpath = curpath.lstrip('/')
|
||||
if not os.path.exists(curpath):
|
||||
try:
|
||||
os.mkdir(curpath)
|
||||
except OSError, ex:
|
||||
# Possibly something else created the dir since the os.path.exists
|
||||
# check above. As long as it's a dir, we don't need to error out.
|
||||
if not (ex.errno == errno.EEXISTS and os.isdir(curpath)):
|
||||
raise
|
||||
tmp_file_args = file_args.copy()
|
||||
tmp_file_args['path']=curpath
|
||||
changed = module.set_fs_attributes_if_different(tmp_file_args, changed)
|
||||
except Exception, e:
|
||||
module.fail_json(path=path, msg='There was an issue creating %s as requested: %s' % (curpath, str(e)))
|
||||
|
||||
# We already know prev_state is not 'absent', therefore it exists in some form.
|
||||
elif prev_state != 'directory':
|
||||
|
|
|
@ -120,6 +120,9 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese
|
|||
if cp.get(section, option):
|
||||
cp.remove_option(section, option)
|
||||
changed = True
|
||||
except ConfigParser.InterpolationError:
|
||||
cp.remove_option(section, option)
|
||||
changed = True
|
||||
except:
|
||||
pass
|
||||
|
||||
|
@ -143,6 +146,9 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese
|
|||
except ConfigParser.NoOptionError:
|
||||
cp.set(section, option, value)
|
||||
changed = True
|
||||
except ConfigParser.InterpolationError:
|
||||
cp.set(section, option, value)
|
||||
changed = True
|
||||
|
||||
if changed and not module.check_mode:
|
||||
if backup:
|
||||
|
|
|
@ -27,10 +27,12 @@ import tempfile
|
|||
DOCUMENTATION = """
|
||||
---
|
||||
module: lineinfile
|
||||
author:
|
||||
author:
|
||||
- "Daniel Hokka Zakrissoni (@dhozac)"
|
||||
- "Ahti Kitsik (@ahtik)"
|
||||
extends_documentation_fragment: files
|
||||
extends_documentation_fragment:
|
||||
- files
|
||||
- validate
|
||||
short_description: Ensure a particular line is in a file, or replace an
|
||||
existing line using a back-referenced regular expression.
|
||||
description:
|
||||
|
@ -116,16 +118,6 @@ options:
|
|||
description:
|
||||
- Create a backup file including the timestamp information so you can
|
||||
get the original file back if you somehow clobbered it incorrectly.
|
||||
validate:
|
||||
required: false
|
||||
description:
|
||||
- validation to run before copying into place.
|
||||
Use %s in the command to indicate the current file to validate.
|
||||
The command is passed securely so shell features like
|
||||
expansion and pipes won't work.
|
||||
required: false
|
||||
default: None
|
||||
version_added: "1.4"
|
||||
others:
|
||||
description:
|
||||
- All arguments accepted by the M(file) module also work here.
|
||||
|
@ -245,8 +237,11 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
|
|||
# Don't do backref expansion if not asked.
|
||||
new_line = line
|
||||
|
||||
if lines[index[0]] != new_line + os.linesep:
|
||||
lines[index[0]] = new_line + os.linesep
|
||||
if not new_line.endswith(os.linesep):
|
||||
new_line += os.linesep
|
||||
|
||||
if lines[index[0]] != new_line:
|
||||
lines[index[0]] = new_line
|
||||
msg = 'line replaced'
|
||||
changed = True
|
||||
elif backrefs:
|
||||
|
|
|
@ -26,7 +26,9 @@ DOCUMENTATION = """
|
|||
---
|
||||
module: replace
|
||||
author: "Evan Kaufman (@EvanK)"
|
||||
extends_documentation_fragment: files
|
||||
extends_documentation_fragment:
|
||||
- files
|
||||
- validate
|
||||
short_description: Replace all instances of a particular string in a
|
||||
file using a back-referenced regular expression.
|
||||
description:
|
||||
|
@ -61,12 +63,6 @@ options:
|
|||
description:
|
||||
- Create a backup file including the timestamp information so you can
|
||||
get the original file back if you somehow clobbered it incorrectly.
|
||||
validate:
|
||||
required: false
|
||||
description:
|
||||
- validation to run before copying into place
|
||||
required: false
|
||||
default: None
|
||||
others:
|
||||
description:
|
||||
- All arguments accepted by the M(file) module also work here.
|
||||
|
|
|
@ -58,6 +58,23 @@ EXAMPLES = '''
|
|||
- fail: msg="Whoops! file ownership has changed"
|
||||
when: st.stat.pw_name != 'root'
|
||||
|
||||
# Determine if a path exists and is a symlink. Note that if the path does
|
||||
# not exist, and we test sym.stat.islnk, it will fail with an error. So
|
||||
# therefore, we must test whether it is defined.
|
||||
# Run this to understand the structure, the skipped ones do not pass the
|
||||
# check performed by 'when'
|
||||
- stat: path=/path/to/something
|
||||
register: sym
|
||||
- debug: msg="islnk isn't defined (path doesn't exist)"
|
||||
when: sym.stat.islnk is not defined
|
||||
- debug: msg="islnk is defined (path must exist)"
|
||||
when: sym.stat.islnk is defined
|
||||
- debug: msg="Path exists and is a symlink"
|
||||
when: sym.stat.islnk is defined and sym.stat.islnk
|
||||
- debug: msg="Path exists and isn't a symlink"
|
||||
when: sym.stat.islnk is defined and sym.stat.islnk == False
|
||||
|
||||
|
||||
# Determine if a path exists and is a directory. Note that we need to test
|
||||
# both that p.stat.isdir actually exists, and also that it's set to true.
|
||||
- stat: path=/path/to/something
|
||||
|
|
|
@ -34,8 +34,8 @@ options:
|
|||
required: true
|
||||
dest_port:
|
||||
description:
|
||||
- Port number for ssh on the destination host. The ansible_ssh_port inventory var takes precedence over this value.
|
||||
default: 22
|
||||
- Port number for ssh on the destination host. Prior to ansible 2.0, the ansible_ssh_port inventory var took precedence over this value.
|
||||
default: Value of ansible_ssh_port for this host, remote_port config setting, or 22 if none of those are set
|
||||
version_added: "1.5"
|
||||
mode:
|
||||
description:
|
||||
|
@ -158,6 +158,12 @@ options:
|
|||
default: no
|
||||
required: false
|
||||
version_added: "2.0"
|
||||
verify_host:
|
||||
description:
|
||||
- Verify destination host key.
|
||||
default: no
|
||||
required: false
|
||||
version_added: "2.0"
|
||||
notes:
|
||||
- rsync must be installed on both the local and remote machine.
|
||||
- Inspect the verbose output to validate the destination user/host/path
|
||||
|
@ -227,6 +233,7 @@ def main():
|
|||
delete = dict(default='no', type='bool'),
|
||||
private_key = dict(default=None),
|
||||
rsync_path = dict(default=None),
|
||||
_local_rsync_path = dict(default='rsync', type='path'),
|
||||
archive = dict(default='yes', type='bool'),
|
||||
checksum = dict(default='no', type='bool'),
|
||||
compress = dict(default='yes', type='bool'),
|
||||
|
@ -244,6 +251,8 @@ def main():
|
|||
rsync_opts = dict(type='list'),
|
||||
ssh_args = dict(type='str'),
|
||||
partial = dict(default='no', type='bool'),
|
||||
verify_host = dict(default='no', type='bool'),
|
||||
mode = dict(default='push', choices=['push', 'pull']),
|
||||
),
|
||||
supports_check_mode = True
|
||||
)
|
||||
|
@ -254,7 +263,7 @@ def main():
|
|||
delete = module.params['delete']
|
||||
private_key = module.params['private_key']
|
||||
rsync_path = module.params['rsync_path']
|
||||
rsync = module.params.get('local_rsync_path', 'rsync')
|
||||
rsync = module.params.get('_local_rsync_path', 'rsync')
|
||||
rsync_timeout = module.params.get('rsync_timeout', 'rsync_timeout')
|
||||
archive = module.params['archive']
|
||||
checksum = module.params['checksum']
|
||||
|
@ -272,6 +281,7 @@ def main():
|
|||
group = module.params['group']
|
||||
rsync_opts = module.params['rsync_opts']
|
||||
ssh_args = module.params['ssh_args']
|
||||
verify_host = module.params['verify_host']
|
||||
|
||||
cmd = '%s --delay-updates -F' % rsync
|
||||
if compress:
|
||||
|
@ -324,10 +334,13 @@ def main():
|
|||
else:
|
||||
private_key = '-i '+ private_key
|
||||
|
||||
ssh_opts = '-S none'
|
||||
|
||||
if not verify_host:
|
||||
ssh_opts = '%s -o StrictHostKeyChecking=no' % ssh_opts
|
||||
|
||||
if ssh_args:
|
||||
ssh_opts = '-S none -o StrictHostKeyChecking=no %s' % ssh_args
|
||||
else:
|
||||
ssh_opts = '-S none -o StrictHostKeyChecking=no'
|
||||
ssh_opts = '%s %s' % (ssh_opts, ssh_args)
|
||||
|
||||
if dest_port != 22:
|
||||
cmd += " --rsh 'ssh %s %s -o Port=%s'" % (private_key, ssh_opts, dest_port)
|
||||
|
|
|
@ -1,5 +1,20 @@
|
|||
# this is a virtual module that is entirely implemented server side
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: template
|
||||
|
@ -24,13 +39,10 @@ options:
|
|||
description:
|
||||
- Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path.
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
dest:
|
||||
description:
|
||||
- Location to render the template to on the remote machine.
|
||||
required: true
|
||||
default: null
|
||||
backup:
|
||||
description:
|
||||
- Create a backup file including the timestamp information so you can get
|
||||
|
@ -38,22 +50,22 @@ options:
|
|||
required: false
|
||||
choices: [ "yes", "no" ]
|
||||
default: "no"
|
||||
validate:
|
||||
force:
|
||||
description:
|
||||
- The validation command to run before copying into place.
|
||||
- The path to the file to validate is passed in via '%s' which must be present as in the visudo example below.
|
||||
- validation to run before copying into place. The command is passed
|
||||
securely so shell features like expansion and pipes won't work.
|
||||
- the default is C(yes), which will replace the remote file when contents
|
||||
are different than the source. If C(no), the file will only be transferred
|
||||
if the destination does not exist.
|
||||
required: false
|
||||
default: ""
|
||||
version_added: "1.2"
|
||||
choices: [ "yes", "no" ]
|
||||
default: "yes"
|
||||
notes:
|
||||
- "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)."
|
||||
requirements: []
|
||||
author:
|
||||
- Ansible Core Team
|
||||
- Ansible Core Team
|
||||
- Michael DeHaan
|
||||
extends_documentation_fragment: files
|
||||
extends_documentation_fragment:
|
||||
- files
|
||||
- validate
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -83,7 +83,7 @@ EXAMPLES = '''
|
|||
# Unarchive a file that is already on the remote machine
|
||||
- unarchive: src=/tmp/foo.zip dest=/usr/local/bin copy=no
|
||||
|
||||
# Unarchive a file that needs to be downloaded
|
||||
# Unarchive a file that needs to be downloaded (added in 2.0)
|
||||
- unarchive: src=https://example.com/example.zip dest=/usr/local/bin copy=no
|
||||
'''
|
||||
|
||||
|
@ -300,6 +300,13 @@ def main():
|
|||
if not os.access(src, os.R_OK):
|
||||
module.fail_json(msg="Source '%s' not readable" % src)
|
||||
|
||||
# skip working with 0 size archives
|
||||
try:
|
||||
if os.path.getsize(src) == 0:
|
||||
module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Source '%s' not readable" % src)
|
||||
|
||||
# is dest OK to receive tar file?
|
||||
if not os.path.isdir(dest):
|
||||
module.fail_json(msg="Destination '%s' is not a directory" % dest)
|
||||
|
|
|
@ -1,5 +1,20 @@
|
|||
# -*- mode: python -*-
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: add_host
|
||||
|
|
|
@ -1,5 +1,20 @@
|
|||
# -*- mode: python -*-
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: group_by
|
||||
|
|
|
@ -46,8 +46,6 @@ options:
|
|||
description:
|
||||
- HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
dest:
|
||||
description:
|
||||
- absolute path of where to download the file to.
|
||||
|
@ -57,7 +55,6 @@ options:
|
|||
If C(dest) is a directory, the file will always be
|
||||
downloaded (regardless of the force option), but replaced only if the contents changed.
|
||||
required: true
|
||||
default: null
|
||||
force:
|
||||
description:
|
||||
- If C(yes) and C(dest) is not a directory, will download the file every
|
||||
|
@ -75,9 +72,22 @@ options:
|
|||
- If a SHA-256 checksum is passed to this parameter, the digest of the
|
||||
destination file will be calculated after it is downloaded to ensure
|
||||
its integrity and verify that the transfer completed successfully.
|
||||
This option is deprecated. Use 'checksum'.
|
||||
version_added: "1.3"
|
||||
required: false
|
||||
default: null
|
||||
checksum:
|
||||
description:
|
||||
- 'If a checksum is passed to this parameter, the digest of the
|
||||
destination file will be calculated after it is downloaded to ensure
|
||||
its integrity and verify that the transfer completed successfully.
|
||||
Format: <algorithm>:<checksum>, e.g.: checksum="sha256:D98291AC[...]B6DC7B97"
|
||||
If you worry about portability, only the sha1 algorithm is available
|
||||
on all platforms and python versions. The third party hashlib
|
||||
library can be installed for access to additional algorithms.'
|
||||
version_added: "2.0"
|
||||
required: false
|
||||
default: null
|
||||
use_proxy:
|
||||
description:
|
||||
- if C(no), it will not use a proxy, even if one is defined in
|
||||
|
@ -98,6 +108,12 @@ options:
|
|||
required: false
|
||||
default: 10
|
||||
version_added: '1.8'
|
||||
headers:
|
||||
description:
|
||||
- 'Add custom HTTP headers to a request in the format "key:value,key:value"'
|
||||
required: false
|
||||
default: null
|
||||
version_added: '2.0'
|
||||
url_username:
|
||||
description:
|
||||
- The username for use in HTTP basic authentication. This parameter can be used
|
||||
|
@ -106,10 +122,20 @@ options:
|
|||
version_added: '1.6'
|
||||
url_password:
|
||||
description:
|
||||
- The password for use in HTTP basic authentication. If the C(url_username)
|
||||
parameter is not specified, the C(url_password) parameter will not be used.
|
||||
- The password for use in HTTP basic authentication. If the C(url_username)
|
||||
parameter is not specified, the C(url_password) parameter will not be used.
|
||||
required: false
|
||||
version_added: '1.6'
|
||||
force_basic_auth:
|
||||
version_added: '2.0'
|
||||
description:
|
||||
- httplib2, the library used by the uri module only sends authentication information when a webservice
|
||||
responds to an initial request with a 401 status. Since some basic auth services do not properly
|
||||
send a 401, logins will fail. This option forces the sending of the Basic authentication header
|
||||
upon initial request.
|
||||
required: false
|
||||
choices: [ "yes", "no" ]
|
||||
default: "no"
|
||||
others:
|
||||
description:
|
||||
- all arguments accepted by the M(file) module also work here
|
||||
|
@ -123,18 +149,19 @@ EXAMPLES='''
|
|||
- name: download foo.conf
|
||||
get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf mode=0440
|
||||
|
||||
- name: download file with sha256 check
|
||||
get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf sha256sum=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
|
||||
- name: download file and force basic auth
|
||||
get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf force_basic_auth=yes
|
||||
|
||||
- name: download file with custom HTTP headers
|
||||
get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf headers='key:value,key:value'
|
||||
|
||||
- name: download file with check
|
||||
get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf checksum=sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
|
||||
get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf checksum=md5:66dffb5228a211e61d6d7ef4a86f5758
|
||||
'''
|
||||
|
||||
import urlparse
|
||||
|
||||
try:
|
||||
import hashlib
|
||||
HAS_HASHLIB=True
|
||||
except ImportError:
|
||||
HAS_HASHLIB=False
|
||||
|
||||
# ==============================================================
|
||||
# url handling
|
||||
|
||||
|
@ -144,14 +171,14 @@ def url_filename(url):
|
|||
return 'index.html'
|
||||
return fn
|
||||
|
||||
def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10):
|
||||
def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None):
|
||||
"""
|
||||
Download data from the url and store in a temporary file.
|
||||
|
||||
Return (tempfile, info about the request)
|
||||
"""
|
||||
|
||||
rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout)
|
||||
rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, headers=headers)
|
||||
|
||||
if info['status'] == 304:
|
||||
module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', ''))
|
||||
|
@ -190,6 +217,7 @@ def extract_filename_from_headers(headers):
|
|||
|
||||
return res
|
||||
|
||||
|
||||
# ==============================================================
|
||||
# main
|
||||
|
||||
|
@ -200,7 +228,9 @@ def main():
|
|||
url = dict(required=True),
|
||||
dest = dict(required=True),
|
||||
sha256sum = dict(default=''),
|
||||
checksum = dict(default=''),
|
||||
timeout = dict(required=False, type='int', default=10),
|
||||
headers = dict(required=False, default=None),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
|
@ -213,14 +243,54 @@ def main():
|
|||
dest = os.path.expanduser(module.params['dest'])
|
||||
force = module.params['force']
|
||||
sha256sum = module.params['sha256sum']
|
||||
checksum = module.params['checksum']
|
||||
use_proxy = module.params['use_proxy']
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# Parse headers to dict
|
||||
if module.params['headers']:
|
||||
try:
|
||||
headers = dict(item.split(':') for item in module.params['headers'].split(','))
|
||||
except:
|
||||
module.fail_json(msg="The header parameter requires a key:value,key:value syntax to be properly parsed.")
|
||||
else:
|
||||
headers = None
|
||||
|
||||
dest_is_dir = os.path.isdir(dest)
|
||||
last_mod_time = None
|
||||
|
||||
# workaround for usage of deprecated sha256sum parameter
|
||||
if sha256sum != '':
|
||||
checksum = 'sha256:%s' % (sha256sum)
|
||||
|
||||
# checksum specified, parse for algorithm and checksum
|
||||
if checksum != '':
|
||||
try:
|
||||
algorithm, checksum = checksum.rsplit(':', 1)
|
||||
# Remove any non-alphanumeric characters, including the infamous
|
||||
# Unicode zero-width space
|
||||
checksum = re.sub(r'\W+', '', checksum).lower()
|
||||
# Ensure the checksum portion is a hexdigest
|
||||
int(checksum, 16)
|
||||
except ValueError:
|
||||
module.fail_json(msg="The checksum parameter has to be in format <algorithm>:<checksum>")
|
||||
|
||||
|
||||
if not dest_is_dir and os.path.exists(dest):
|
||||
if not force:
|
||||
checksum_mismatch = False
|
||||
|
||||
# If the download is not forced and there is a checksum, allow
|
||||
# checksum match to skip the download.
|
||||
if not force and checksum != '':
|
||||
destination_checksum = module.digest_from_file(dest, algorithm)
|
||||
|
||||
if checksum == destination_checksum:
|
||||
module.exit_json(msg="file already exists", dest=dest, url=url, changed=False)
|
||||
|
||||
checksum_mismatch = True
|
||||
|
||||
# Not forcing redownload, unless checksum does not match
|
||||
if not force and not checksum_mismatch:
|
||||
module.exit_json(msg="file already exists", dest=dest, url=url, changed=False)
|
||||
|
||||
# If the file already exists, prepare the last modified time for the
|
||||
|
@ -229,7 +299,7 @@ def main():
|
|||
last_mod_time = datetime.datetime.utcfromtimestamp(mtime)
|
||||
|
||||
# download to tmpsrc
|
||||
tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout)
|
||||
tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers)
|
||||
|
||||
# Now the request has completed, we can finally generate the final
|
||||
# destination file name from the info dict.
|
||||
|
@ -280,22 +350,12 @@ def main():
|
|||
else:
|
||||
changed = False
|
||||
|
||||
# Check the digest of the destination file and ensure that it matches the
|
||||
# sha256sum parameter if it is present
|
||||
if sha256sum != '':
|
||||
# Remove any non-alphanumeric characters, including the infamous
|
||||
# Unicode zero-width space
|
||||
stripped_sha256sum = re.sub(r'\W+', '', sha256sum)
|
||||
if checksum != '':
|
||||
destination_checksum = module.digest_from_file(dest, algorithm)
|
||||
|
||||
if not HAS_HASHLIB:
|
||||
if checksum != destination_checksum:
|
||||
os.remove(dest)
|
||||
module.fail_json(msg="The sha256sum parameter requires hashlib, which is available in Python 2.5 and higher")
|
||||
else:
|
||||
destination_checksum = module.sha256(dest)
|
||||
|
||||
if stripped_sha256sum.lower() != destination_checksum:
|
||||
os.remove(dest)
|
||||
module.fail_json(msg="The SHA-256 checksum for %s did not match %s; it was %s." % (dest, sha256sum, destination_checksum))
|
||||
module.fail_json(msg="The checksum for %s did not match %s; it was %s." % (dest, checksum, destination_checksum))
|
||||
|
||||
os.remove(tmpsrc)
|
||||
|
||||
|
@ -312,9 +372,8 @@ def main():
|
|||
md5sum = None
|
||||
|
||||
# Mission complete
|
||||
|
||||
module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum, checksum=checksum_src,
|
||||
sha256sum=sha256sum, changed=changed, msg=info.get('msg', ''))
|
||||
module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum, checksum_src=checksum_src,
|
||||
checksum_dest=checksum_dest, changed=changed, msg=info.get('msg', ''))
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
|
|
@ -71,11 +71,12 @@ options:
|
|||
required: false
|
||||
choices: [ "raw", "json" ]
|
||||
default: raw
|
||||
version_added: "2.0"
|
||||
method:
|
||||
description:
|
||||
- The HTTP method of the request or response.
|
||||
required: false
|
||||
choices: [ "GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH" ]
|
||||
choices: [ "GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH", "TRACE", "CONNECT", "REFRESH" ]
|
||||
default: "GET"
|
||||
return_content:
|
||||
description:
|
||||
|
@ -269,7 +270,7 @@ def url_filename(url):
|
|||
|
||||
def uri(module, url, dest, user, password, body, body_format, method, headers, redirects, socket_timeout, validate_certs):
|
||||
# To debug
|
||||
#httplib2.debug = 4
|
||||
#httplib2.debuglevel = 4
|
||||
|
||||
# Handle Redirects
|
||||
if redirects == "all" or redirects == "yes":
|
||||
|
@ -367,7 +368,7 @@ def main():
|
|||
password = dict(required=False, default=None),
|
||||
body = dict(required=False, default=None),
|
||||
body_format = dict(required=False, default='raw', choices=['raw', 'json']),
|
||||
method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH']),
|
||||
method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH', 'TRACE', 'CONNECT', 'REFRESH']),
|
||||
return_content = dict(required=False, default='no', type='bool'),
|
||||
force_basic_auth = dict(required=False, default='no', type='bool'),
|
||||
follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']),
|
||||
|
|
|
@ -84,7 +84,7 @@ options:
|
|||
- Allow adding build flags for gem compilation
|
||||
required: false
|
||||
version_added: "2.0"
|
||||
author:
|
||||
author:
|
||||
- "Ansible Core Team"
|
||||
- "Johan Wiren"
|
||||
'''
|
||||
|
@ -196,8 +196,11 @@ def install(module):
|
|||
if module.params['pre_release']:
|
||||
cmd.append('--pre')
|
||||
if not module.params['include_doc']:
|
||||
cmd.append('--no-rdoc')
|
||||
cmd.append('--no-ri')
|
||||
if major and major < 2:
|
||||
cmd.append('--no-rdoc')
|
||||
cmd.append('--no-ri')
|
||||
else:
|
||||
cmd.append('--no-document')
|
||||
cmd.append(module.params['gem_source'])
|
||||
if module.params['build_flags']:
|
||||
cmd.extend([ '--', module.params['build_flags'] ])
|
||||
|
|
|
@ -63,13 +63,21 @@ options:
|
|||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
virtualenv_command:
|
||||
version_aded: "1.1"
|
||||
version_added: "1.1"
|
||||
description:
|
||||
- The command or a pathname to the command to create the virtual
|
||||
environment with. For example C(pyvenv), C(virtualenv),
|
||||
C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv).
|
||||
required: false
|
||||
default: virtualenv
|
||||
virtualenv_python:
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- The Python executable used for creating the virtual environment.
|
||||
For example C(python3.4), C(python2.7). When not specified, the
|
||||
system Python version is used.
|
||||
required: false
|
||||
default: null
|
||||
state:
|
||||
description:
|
||||
- The state of module
|
||||
|
@ -147,7 +155,7 @@ def _get_cmd_options(module, cmd):
|
|||
words = stdout.strip().split()
|
||||
cmd_options = [ x for x in words if x.startswith('--') ]
|
||||
return cmd_options
|
||||
|
||||
|
||||
|
||||
def _get_full_name(name, version=None):
|
||||
if version is None:
|
||||
|
@ -228,6 +236,7 @@ def main():
|
|||
virtualenv=dict(default=None, required=False),
|
||||
virtualenv_site_packages=dict(default='no', type='bool'),
|
||||
virtualenv_command=dict(default='virtualenv', required=False),
|
||||
virtualenv_python=dict(default=None, required=False, type='str'),
|
||||
use_mirrors=dict(default='yes', type='bool'),
|
||||
extra_args=dict(default=None, required=False),
|
||||
chdir=dict(default=None, required=False),
|
||||
|
@ -243,6 +252,7 @@ def main():
|
|||
version = module.params['version']
|
||||
requirements = module.params['requirements']
|
||||
extra_args = module.params['extra_args']
|
||||
virtualenv_python = module.params['virtualenv_python']
|
||||
chdir = module.params['chdir']
|
||||
|
||||
if state == 'latest' and version is not None:
|
||||
|
@ -260,18 +270,21 @@ def main():
|
|||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
virtualenv = os.path.expanduser(virtualenv_command)
|
||||
if os.path.basename(virtualenv) == virtualenv:
|
||||
virtualenv = module.get_bin_path(virtualenv_command, True)
|
||||
cmd = os.path.expanduser(virtualenv_command)
|
||||
if os.path.basename(cmd) == cmd:
|
||||
cmd = module.get_bin_path(virtualenv_command, True)
|
||||
|
||||
if module.params['virtualenv_site_packages']:
|
||||
cmd = '%s --system-site-packages %s' % (virtualenv, env)
|
||||
cmd += ' --system-site-packages'
|
||||
else:
|
||||
cmd_opts = _get_cmd_options(module, virtualenv)
|
||||
cmd_opts = _get_cmd_options(module, cmd)
|
||||
if '--no-site-packages' in cmd_opts:
|
||||
cmd = '%s --no-site-packages %s' % (virtualenv, env)
|
||||
else:
|
||||
cmd = '%s %s' % (virtualenv, env)
|
||||
cmd += ' --no-site-packages'
|
||||
|
||||
if virtualenv_python:
|
||||
cmd += ' -p%s' % virtualenv_python
|
||||
|
||||
cmd = "%s %s" % (cmd, env)
|
||||
this_dir = tempfile.gettempdir()
|
||||
if chdir:
|
||||
this_dir = os.path.join(this_dir, chdir)
|
||||
|
@ -286,14 +299,14 @@ def main():
|
|||
cmd = '%s %s' % (pip, state_map[state])
|
||||
|
||||
# If there's a virtualenv we want things we install to be able to use other
|
||||
# installations that exist as binaries within this virtualenv. Example: we
|
||||
# install cython and then gevent -- gevent needs to use the cython binary,
|
||||
# not just a python package that will be found by calling the right python.
|
||||
# installations that exist as binaries within this virtualenv. Example: we
|
||||
# install cython and then gevent -- gevent needs to use the cython binary,
|
||||
# not just a python package that will be found by calling the right python.
|
||||
# So if there's a virtualenv, we add that bin/ to the beginning of the PATH
|
||||
# in run_command by setting path_prefix here.
|
||||
path_prefix = None
|
||||
if env:
|
||||
path_prefix="/".join(pip.split('/')[:-1])
|
||||
path_prefix = "/".join(pip.split('/')[:-1])
|
||||
|
||||
# Automatically apply -e option to extra_args when source is a VCS url. VCS
|
||||
# includes those beginning with svn+, git+, hg+ or bzr+
|
||||
|
@ -320,7 +333,7 @@ def main():
|
|||
this_dir = os.path.join(this_dir, chdir)
|
||||
|
||||
if module.check_mode:
|
||||
if env or extra_args or requirements or state == 'latest' or not name:
|
||||
if extra_args or requirements or state == 'latest' or not name:
|
||||
module.exit_json(changed=True)
|
||||
elif name.startswith('svn+') or name.startswith('git+') or \
|
||||
name.startswith('hg+') or name.startswith('bzr+'):
|
||||
|
@ -343,7 +356,8 @@ def main():
|
|||
rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=this_dir)
|
||||
out += out_pip
|
||||
err += err_pip
|
||||
if rc == 1 and state == 'absent' and 'not installed' in out_pip:
|
||||
if rc == 1 and state == 'absent' and \
|
||||
('not installed' in out_pip or 'not installed' in err_pip):
|
||||
pass # rc is 1 when attempting to uninstall non-installed package
|
||||
elif rc != 0:
|
||||
_fail(module, cmd, out, err)
|
||||
|
@ -354,7 +368,8 @@ def main():
|
|||
changed = 'Successfully installed' in out_pip
|
||||
|
||||
module.exit_json(changed=changed, cmd=cmd, name=name, version=version,
|
||||
state=state, requirements=requirements, virtualenv=env, stdout=out, stderr=err)
|
||||
state=state, requirements=requirements, virtualenv=env,
|
||||
stdout=out, stderr=err)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
|
41
packaging/os/apt.py
Normal file → Executable file
41
packaging/os/apt.py
Normal file → Executable file
|
@ -80,8 +80,8 @@ options:
|
|||
- 'Note: This does not upgrade a specific package, use state=latest for that.'
|
||||
version_added: "1.1"
|
||||
required: false
|
||||
default: "yes"
|
||||
choices: [ "yes", "safe", "full", "dist"]
|
||||
default: "no"
|
||||
choices: [ "no", "yes", "safe", "full", "dist"]
|
||||
dpkg_options:
|
||||
description:
|
||||
- Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"'
|
||||
|
@ -179,8 +179,8 @@ APT_ENV_VARS = dict(
|
|||
)
|
||||
|
||||
DPKG_OPTIONS = 'force-confdef,force-confold'
|
||||
APT_GET_ZERO = "0 upgraded, 0 newly installed"
|
||||
APTITUDE_ZERO = "0 packages upgraded, 0 newly installed"
|
||||
APT_GET_ZERO = "\n0 upgraded, 0 newly installed"
|
||||
APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed"
|
||||
APT_LISTS_PATH = "/var/lib/apt/lists"
|
||||
APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp"
|
||||
|
||||
|
@ -230,10 +230,10 @@ def package_status(m, pkgname, version, cache, state):
|
|||
try:
|
||||
provided_packages = cache.get_providing_packages(pkgname)
|
||||
if provided_packages:
|
||||
is_installed = False
|
||||
is_installed = False
|
||||
# when virtual package providing only one package, look up status of target package
|
||||
if cache.is_virtual_package(pkgname) and len(provided_packages) == 1:
|
||||
package = provided_packages[0]
|
||||
package = provided_packages[0]
|
||||
installed, upgradable, has_files = package_status(m, package.name, version, cache, state='install')
|
||||
if installed:
|
||||
is_installed = True
|
||||
|
@ -403,19 +403,20 @@ def install_deb(m, debs, cache, force, install_recommends, dpkg_options):
|
|||
for deb_file in debs.split(','):
|
||||
try:
|
||||
pkg = apt.debfile.DebPackage(deb_file)
|
||||
except SystemError, e:
|
||||
m.fail_json(msg="System Error: %s" % str(e))
|
||||
|
||||
# Check if it's already installed
|
||||
if pkg.compare_to_version_in_cache() == pkg.VERSION_SAME:
|
||||
continue
|
||||
# Check if package is installable
|
||||
if not pkg.check() and not force:
|
||||
m.fail_json(msg=pkg._failure_string)
|
||||
# Check if it's already installed
|
||||
if pkg.compare_to_version_in_cache() == pkg.VERSION_SAME:
|
||||
continue
|
||||
# Check if package is installable
|
||||
if not pkg.check() and not force:
|
||||
m.fail_json(msg=pkg._failure_string)
|
||||
|
||||
# add any missing deps to the list of deps we need
|
||||
# to install so they're all done in one shot
|
||||
deps_to_install.extend(pkg.missing_deps)
|
||||
# add any missing deps to the list of deps we need
|
||||
# to install so they're all done in one shot
|
||||
deps_to_install.extend(pkg.missing_deps)
|
||||
|
||||
except Exception, e:
|
||||
m.fail_json(msg="Unable to install package: %s" % str(e))
|
||||
|
||||
# and add this deb to the list of packages to install
|
||||
pkgs_to_install.append(deb_file)
|
||||
|
@ -548,7 +549,7 @@ def main():
|
|||
default_release = dict(default=None, aliases=['default-release']),
|
||||
install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'),
|
||||
force = dict(default='no', type='bool'),
|
||||
upgrade = dict(choices=['yes', 'safe', 'full', 'dist']),
|
||||
upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']),
|
||||
dpkg_options = dict(default=DPKG_OPTIONS)
|
||||
),
|
||||
mutually_exclusive = [['package', 'upgrade', 'deb']],
|
||||
|
@ -572,6 +573,10 @@ def main():
|
|||
APT_GET_CMD = module.get_bin_path("apt-get")
|
||||
|
||||
p = module.params
|
||||
|
||||
if p['upgrade'] == 'no':
|
||||
p['upgrade'] = None
|
||||
|
||||
if not APTITUDE_CMD and p.get('upgrade', None) in [ 'full', 'safe', 'yes' ]:
|
||||
module.fail_json(msg="Could not find aptitude. Please ensure it is installed.")
|
||||
|
||||
|
|
|
@ -124,7 +124,8 @@ class InvalidSource(Exception):
|
|||
# Simple version of aptsources.sourceslist.SourcesList.
|
||||
# No advanced logic and no backups inside.
|
||||
class SourcesList(object):
|
||||
def __init__(self):
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.files = {} # group sources by file
|
||||
# Repositories that we're adding -- used to implement mode param
|
||||
self.new_repos = set()
|
||||
|
@ -234,7 +235,7 @@ class SourcesList(object):
|
|||
group.append((n, valid, enabled, source, comment))
|
||||
self.files[file] = group
|
||||
|
||||
def save(self, module):
|
||||
def save(self):
|
||||
for filename, sources in self.files.items():
|
||||
if sources:
|
||||
d, fn = os.path.split(filename)
|
||||
|
@ -255,13 +256,13 @@ class SourcesList(object):
|
|||
try:
|
||||
f.write(line)
|
||||
except IOError, err:
|
||||
module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, unicode(err)))
|
||||
module.atomic_move(tmp_path, filename)
|
||||
self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, unicode(err)))
|
||||
self.module.atomic_move(tmp_path, filename)
|
||||
|
||||
# allow the user to override the default mode
|
||||
if filename in self.new_repos:
|
||||
this_mode = module.params['mode']
|
||||
module.set_mode_if_different(filename, this_mode, False)
|
||||
this_mode = self.module.params['mode']
|
||||
self.module.set_mode_if_different(filename, this_mode, False)
|
||||
else:
|
||||
del self.files[filename]
|
||||
if os.path.exists(filename):
|
||||
|
@ -329,7 +330,7 @@ class UbuntuSourcesList(SourcesList):
|
|||
def __init__(self, module, add_ppa_signing_keys_callback=None):
|
||||
self.module = module
|
||||
self.add_ppa_signing_keys_callback = add_ppa_signing_keys_callback
|
||||
super(UbuntuSourcesList, self).__init__()
|
||||
super(UbuntuSourcesList, self).__init__(module)
|
||||
|
||||
def _get_ppa_info(self, owner_name, ppa_name):
|
||||
lp_api = self.LP_API % (owner_name, ppa_name)
|
||||
|
@ -359,6 +360,10 @@ class UbuntuSourcesList(SourcesList):
|
|||
if line.startswith('ppa:'):
|
||||
source, ppa_owner, ppa_name = self._expand_ppa(line)
|
||||
|
||||
if source in self.repos_urls:
|
||||
# repository already exists
|
||||
return
|
||||
|
||||
if self.add_ppa_signing_keys_callback is not None:
|
||||
info = self._get_ppa_info(ppa_owner, ppa_name)
|
||||
if not self._key_already_exists(info['signing_key_fingerprint']):
|
||||
|
@ -378,6 +383,25 @@ class UbuntuSourcesList(SourcesList):
|
|||
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
|
||||
self._remove_valid_source(source)
|
||||
|
||||
@property
|
||||
def repos_urls(self):
|
||||
_repositories = []
|
||||
for parsed_repos in self.files.values():
|
||||
for parsed_repo in parsed_repos:
|
||||
enabled = parsed_repo[1]
|
||||
source_line = parsed_repo[3]
|
||||
|
||||
if not enabled:
|
||||
continue
|
||||
|
||||
if source_line.startswith('ppa:'):
|
||||
source, ppa_owner, ppa_name = self._expand_ppa(source_line)
|
||||
_repositories.append(source)
|
||||
else:
|
||||
_repositories.append(source_line)
|
||||
|
||||
return _repositories
|
||||
|
||||
|
||||
def get_add_ppa_signing_key_callback(module):
|
||||
def _run_command(command):
|
||||
|
@ -404,24 +428,24 @@ def main():
|
|||
)
|
||||
|
||||
params = module.params
|
||||
if params['install_python_apt'] and not HAVE_PYTHON_APT and not module.check_mode:
|
||||
install_python_apt(module)
|
||||
|
||||
repo = module.params['repo']
|
||||
state = module.params['state']
|
||||
update_cache = module.params['update_cache']
|
||||
sourceslist = None
|
||||
|
||||
if HAVE_PYTHON_APT:
|
||||
if isinstance(distro, aptsources_distro.UbuntuDistribution):
|
||||
sourceslist = UbuntuSourcesList(module,
|
||||
add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module))
|
||||
elif HAVE_PYTHON_APT and \
|
||||
isinstance(distro, aptsources_distro.DebianDistribution) or isinstance(distro, aptsources_distro.Distribution):
|
||||
sourceslist = SourcesList()
|
||||
if not HAVE_PYTHON_APT:
|
||||
if params['install_python_apt']:
|
||||
install_python_apt(module)
|
||||
else:
|
||||
module.fail_json(msg='python-apt is not installed, and install_python_apt is False')
|
||||
|
||||
if isinstance(distro, aptsources_distro.UbuntuDistribution):
|
||||
sourceslist = UbuntuSourcesList(module,
|
||||
add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module))
|
||||
elif isinstance(distro, aptsources_distro.Distribution):
|
||||
sourceslist = SourcesList(module)
|
||||
else:
|
||||
module.fail_json(msg='Module apt_repository supports only Debian and Ubuntu. ' + \
|
||||
'You may be seeing this because python-apt is not installed, but you requested that it not be auto-installed')
|
||||
module.fail_json(msg='Module apt_repository supports only Debian and Ubuntu.')
|
||||
|
||||
sources_before = sourceslist.dump()
|
||||
|
||||
|
@ -438,7 +462,7 @@ def main():
|
|||
|
||||
if not module.check_mode and changed:
|
||||
try:
|
||||
sourceslist.save(module)
|
||||
sourceslist.save()
|
||||
if update_cache:
|
||||
cache = apt.Cache()
|
||||
cache.update()
|
||||
|
|
|
@ -23,19 +23,29 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: package
|
||||
version_added: 2.0
|
||||
author: Ansible Core Team
|
||||
author:
|
||||
- Ansible Inc
|
||||
maintainers:
|
||||
- Ansible Core Team
|
||||
short_description: Generic OS package manager
|
||||
description:
|
||||
- Installs, upgrade and removes packages using the underlying OS package manager.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: yum -y update. You can also pass a url or a local path to a rpm file. To operate on several packages this can accept a comma separated list of packages or (as of 2.0) a list of packages."
|
||||
- "Package name, or package specifier with version, like C(name-1.0)."
|
||||
- "Be aware that packages are not always named the same and this module will not 'translate' them per distro."
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
|
||||
required: true
|
||||
use:
|
||||
description:
|
||||
- The required package manager module to use (yum, apt, etc). The default 'auto' will use existing facts or try to autodetect it.
|
||||
- You should only use this field if the automatic selection is not working for some reason.
|
||||
required: false
|
||||
default: auto
|
||||
requirements:
|
||||
- Whatever is required for the package plugins specific for each system.
|
||||
notes:
|
||||
|
|
|
@ -76,6 +76,12 @@ EXAMPLES = '''
|
|||
- redhat_subscription: state=present
|
||||
activationkey=1-222333444
|
||||
pool='^(Red Hat Enterprise Server|Red Hat Virtualization)$'
|
||||
|
||||
# Update the consumed subscriptions from the previous example (remove the Red
|
||||
# Hat Virtualization subscription)
|
||||
- redhat_subscription: state=present
|
||||
activationkey=1-222333444
|
||||
pool='^Red Hat Enterprise Server$'
|
||||
'''
|
||||
|
||||
import os
|
||||
|
@ -180,7 +186,7 @@ class Rhsm(RegistrationBase):
|
|||
for k,v in kwargs.items():
|
||||
if re.search(r'^(system|rhsm)_', k):
|
||||
args.append('--%s=%s' % (k.replace('_','.'), v))
|
||||
|
||||
|
||||
self.module.run_command(args, check_rc=True)
|
||||
|
||||
@property
|
||||
|
@ -226,14 +232,26 @@ class Rhsm(RegistrationBase):
|
|||
|
||||
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
|
||||
|
||||
def unsubscribe(self):
|
||||
def unsubscribe(self, serials=None):
|
||||
'''
|
||||
Unsubscribe a system from all subscribed channels
|
||||
Unsubscribe a system from subscribed channels
|
||||
Args:
|
||||
serials(list or None): list of serials to unsubscribe. If
|
||||
serials is none or an empty list, then
|
||||
all subscribed channels will be removed.
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
args = ['subscription-manager', 'unsubscribe', '--all']
|
||||
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
|
||||
items = []
|
||||
if serials is not None and serials:
|
||||
items = ["--serial=%s" % s for s in serials]
|
||||
if serials is None:
|
||||
items = ["--all"]
|
||||
|
||||
if items:
|
||||
args = ['subscription-manager', 'unsubscribe'] + items
|
||||
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
|
||||
return serials
|
||||
|
||||
def unregister(self):
|
||||
'''
|
||||
|
@ -255,8 +273,27 @@ class Rhsm(RegistrationBase):
|
|||
# Available pools ready for subscription
|
||||
available_pools = RhsmPools(self.module)
|
||||
|
||||
subscribed_pool_ids = []
|
||||
for pool in available_pools.filter(regexp):
|
||||
pool.subscribe()
|
||||
subscribed_pool_ids.append(pool.get_pool_id())
|
||||
return subscribed_pool_ids
|
||||
|
||||
def update_subscriptions(self, regexp):
|
||||
changed=False
|
||||
consumed_pools = RhsmPools(self.module, consumed=True)
|
||||
pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter(regexp)]
|
||||
|
||||
serials_to_remove=[p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep]
|
||||
serials = self.unsubscribe(serials=serials_to_remove)
|
||||
|
||||
subscribed_pool_ids = self.subscribe(regexp)
|
||||
|
||||
if subscribed_pool_ids or serials:
|
||||
changed=True
|
||||
return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids,
|
||||
'unsubscribed_serials': serials}
|
||||
|
||||
|
||||
|
||||
class RhsmPool(object):
|
||||
|
@ -272,8 +309,11 @@ class RhsmPool(object):
|
|||
def __str__(self):
|
||||
return str(self.__getattribute__('_name'))
|
||||
|
||||
def get_pool_id(self):
|
||||
return getattr(self, 'PoolId', getattr(self, 'PoolID'))
|
||||
|
||||
def subscribe(self):
|
||||
args = "subscription-manager subscribe --pool %s" % self.PoolId
|
||||
args = "subscription-manager subscribe --pool %s" % self.get_pool_id()
|
||||
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
|
||||
if rc == 0:
|
||||
return True
|
||||
|
@ -285,18 +325,25 @@ class RhsmPools(object):
|
|||
"""
|
||||
This class is used for manipulating pools subscriptions with RHSM
|
||||
"""
|
||||
def __init__(self, module):
|
||||
def __init__(self, module, consumed=False):
|
||||
self.module = module
|
||||
self.products = self._load_product_list()
|
||||
self.products = self._load_product_list(consumed)
|
||||
|
||||
def __iter__(self):
|
||||
return self.products.__iter__()
|
||||
|
||||
def _load_product_list(self):
|
||||
def _load_product_list(self, consumed=False):
|
||||
"""
|
||||
Loads list of all available pools for system in data structure
|
||||
Loads list of all available or consumed pools for system in data structure
|
||||
|
||||
Args:
|
||||
consumed(bool): if True list consumed pools, else list available pools (default False)
|
||||
"""
|
||||
args = "subscription-manager list --available"
|
||||
args = "subscription-manager list"
|
||||
if consumed:
|
||||
args += " --consumed"
|
||||
else:
|
||||
args += " --available"
|
||||
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
|
||||
|
||||
products = []
|
||||
|
@ -375,18 +422,27 @@ def main():
|
|||
|
||||
# Register system
|
||||
if rhn.is_registered:
|
||||
module.exit_json(changed=False, msg="System already registered.")
|
||||
if pool != '^$':
|
||||
try:
|
||||
result = rhn.update_subscriptions(pool)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, e))
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
module.exit_json(changed=False, msg="System already registered.")
|
||||
else:
|
||||
try:
|
||||
rhn.enable()
|
||||
rhn.configure(**module.params)
|
||||
rhn.register(username, password, autosubscribe, activationkey, org_id)
|
||||
rhn.subscribe(pool)
|
||||
subscribed_pool_ids = rhn.subscribe(pool)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, e))
|
||||
else:
|
||||
module.exit_json(changed=True, msg="System successfully registered to '%s'." % server_hostname)
|
||||
|
||||
module.exit_json(changed=True,
|
||||
msg="System successfully registered to '%s'." % server_hostname,
|
||||
subscribed_pool_ids=subscribed_pool_ids)
|
||||
# Ensure system is *not* registered
|
||||
if state == 'absent':
|
||||
if not rhn.is_registered:
|
||||
|
|
|
@ -56,6 +56,12 @@ options:
|
|||
- supply an activation key for use with registration
|
||||
required: False
|
||||
default: null
|
||||
profilename:
|
||||
description:
|
||||
- supply an profilename for use with registration
|
||||
required: False
|
||||
default: null
|
||||
version_added: "2.0"
|
||||
channels:
|
||||
description:
|
||||
- Optionally specify a list of comma-separated channels to subscribe to upon successful registration.
|
||||
|
@ -73,6 +79,9 @@ EXAMPLES = '''
|
|||
# Register with activationkey (1-222333444) and enable extended update support.
|
||||
- rhn_register: state=present activationkey=1-222333444 enable_eus=true
|
||||
|
||||
# Register with activationkey (1-222333444) and set a profilename which may differ from the hostname.
|
||||
- rhn_register: state=present activationkey=1-222333444 profilename=host.example.com.custom
|
||||
|
||||
# Register as user (joe_user) with password (somepass) against a satellite
|
||||
# server specified by (server_url).
|
||||
- rhn_register: >
|
||||
|
@ -209,7 +218,7 @@ class Rhn(RegistrationBase):
|
|||
self.update_plugin_conf('rhnplugin', True)
|
||||
self.update_plugin_conf('subscription-manager', False)
|
||||
|
||||
def register(self, enable_eus=False, activationkey=None):
|
||||
def register(self, enable_eus=False, activationkey=None, profilename=None):
|
||||
'''
|
||||
Register system to RHN. If enable_eus=True, extended update
|
||||
support will be requested.
|
||||
|
@ -221,7 +230,8 @@ class Rhn(RegistrationBase):
|
|||
register_cmd += " --use-eus-channel"
|
||||
if activationkey is not None:
|
||||
register_cmd += " --activationkey '%s'" % activationkey
|
||||
# FIXME - support --profilename
|
||||
if profilename is not None:
|
||||
register_cmd += " --profilename '%s'" % profilename
|
||||
# FIXME - support --systemorgid
|
||||
rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True, use_unsafe_shell=True)
|
||||
|
||||
|
@ -285,6 +295,7 @@ def main():
|
|||
password = dict(default=None, required=False),
|
||||
server_url = dict(default=rhn.config.get_option('serverURL'), required=False),
|
||||
activationkey = dict(default=None, required=False),
|
||||
profilename = dict(default=None, required=False),
|
||||
enable_eus = dict(default=False, type='bool'),
|
||||
channels = dict(default=[], type='list'),
|
||||
)
|
||||
|
@ -295,6 +306,7 @@ def main():
|
|||
rhn.password = module.params['password']
|
||||
rhn.configure(module.params['server_url'])
|
||||
activationkey = module.params['activationkey']
|
||||
profilename = module.params['profilename']
|
||||
channels = module.params['channels']
|
||||
rhn.module = module
|
||||
|
||||
|
|
|
@ -141,7 +141,14 @@ class RpmKey:
|
|||
return ret
|
||||
|
||||
def getkeyid(self, keyfile):
|
||||
gpg = self.module.get_bin_path('gpg', True)
|
||||
|
||||
gpg = self.module.get_bin_path('gpg')
|
||||
if not gpg:
|
||||
gpg = self.module.get_bin_path('gpg2')
|
||||
|
||||
if not gpg:
|
||||
self.json_fail(msg="rpm_key requires a command line gpg or gpg2, none found")
|
||||
|
||||
stdout, stderr = self.execute_command([gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', '--list-packets', keyfile])
|
||||
for line in stdout.splitlines():
|
||||
line = line.strip()
|
||||
|
|
|
@ -118,10 +118,22 @@ options:
|
|||
choices: ["yes", "no"]
|
||||
aliases: []
|
||||
|
||||
notes: []
|
||||
notes:
|
||||
- When used with a loop of package names in a playbook, ansible optimizes
|
||||
the call to the yum module. Instead of calling the module with a single
|
||||
package each time through the loop, ansible calls the module once with all
|
||||
of the package names from the loop.
|
||||
- In versions prior to 1.9.2 this module installed and removed each package
|
||||
given to the yum module separately. This caused problems when packages
|
||||
specified by filename or url had to be installed or removed together. In
|
||||
1.9.2 this was fixed so that packages are installed in one yum
|
||||
transaction. However, if one of the packages adds a new yum repository
|
||||
that the other packages come from (such as epel-release) then that package
|
||||
needs to be installed in a separate task. This mimics yum's command line
|
||||
behaviour.
|
||||
# informational: requirements for nodes
|
||||
requirements: [ yum ]
|
||||
author:
|
||||
author:
|
||||
- "Ansible Core Team"
|
||||
- "Seth Vidal"
|
||||
'''
|
||||
|
@ -212,7 +224,7 @@ def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, di
|
|||
for rid in en_repos:
|
||||
my.repos.enableRepo(rid)
|
||||
|
||||
e,m,u = my.rpmdb.matchPackageNames([pkgspec])
|
||||
e, m, u = my.rpmdb.matchPackageNames([pkgspec])
|
||||
pkgs = e + m
|
||||
if not pkgs:
|
||||
pkgs.extend(my.returnInstalledPackagesByDep(pkgspec))
|
||||
|
@ -224,16 +236,16 @@ def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, di
|
|||
else:
|
||||
|
||||
cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, pkgspec]
|
||||
rc,out,err = module.run_command(cmd)
|
||||
rc, out, err = module.run_command(cmd)
|
||||
if not is_pkg:
|
||||
cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, "--whatprovides", pkgspec]
|
||||
rc2,out2,err2 = module.run_command(cmd)
|
||||
rc2, out2, err2 = module.run_command(cmd)
|
||||
else:
|
||||
rc2,out2,err2 = (0, '', '')
|
||||
rc2, out2, err2 = (0, '', '')
|
||||
|
||||
if rc == 0 and rc2 == 0:
|
||||
out += out2
|
||||
return [ p for p in out.split('\n') if p.strip() ]
|
||||
return [p for p in out.split('\n') if p.strip()]
|
||||
else:
|
||||
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
|
||||
|
||||
|
@ -541,7 +553,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
|
|||
module.fail_json(msg="Failure downloading %s, %s" % (spec, e))
|
||||
|
||||
#groups :(
|
||||
elif spec.startswith('@'):
|
||||
elif spec.startswith('@'):
|
||||
# complete wild ass guess b/c it's a group
|
||||
pkg = spec
|
||||
|
||||
|
@ -608,7 +620,8 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
|
|||
shutil.rmtree(tempdir)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e))
|
||||
module.exit_json(changed=True)
|
||||
|
||||
module.exit_json(changed=True, results=res['results'], changes=dict(installed=pkgs))
|
||||
|
||||
changed = True
|
||||
|
||||
|
@ -676,7 +689,7 @@ def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
|
|||
cmd = yum_basecmd + ["remove"] + pkgs
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
module.exit_json(changed=True, results=res['results'], changes=dict(removed=pkgs))
|
||||
|
||||
rc, out, err = module.run_command(cmd)
|
||||
|
||||
|
@ -711,47 +724,69 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
|
|||
res['msg'] = ''
|
||||
res['changed'] = False
|
||||
res['rc'] = 0
|
||||
pkgs = {}
|
||||
pkgs['update'] = []
|
||||
pkgs['install'] = []
|
||||
updates = {}
|
||||
update_all = False
|
||||
cmd = None
|
||||
|
||||
for spec in items:
|
||||
# determine if we're doing an update all
|
||||
if '*' in items:
|
||||
update_all = True
|
||||
|
||||
pkg = None
|
||||
basecmd = 'update'
|
||||
cmd = ''
|
||||
# groups, again
|
||||
if spec.startswith('@'):
|
||||
pkg = spec
|
||||
|
||||
elif spec == '*': #update all
|
||||
# use check-update to see if there is any need
|
||||
rc,out,err = module.run_command(yum_basecmd + ['check-update'])
|
||||
if rc == 100:
|
||||
cmd = yum_basecmd + [basecmd]
|
||||
else:
|
||||
res['results'].append('All packages up to date')
|
||||
# run check-update to see if we have packages pending
|
||||
rc, out, err = module.run_command(yum_basecmd + ['check-update'])
|
||||
if rc == 0 and update_all:
|
||||
res['results'].append('Nothing to do here, all packages are up to date')
|
||||
return res
|
||||
elif rc == 100:
|
||||
available_updates = out.split('\n')
|
||||
# build update dictionary
|
||||
for line in available_updates:
|
||||
line = line.split()
|
||||
# ignore irrelevant lines
|
||||
# FIXME... revisit for something less kludgy
|
||||
if '*' in line or len(line) != 3 or '.' not in line[0]:
|
||||
continue
|
||||
|
||||
# dep/pkgname - find it
|
||||
else:
|
||||
if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
basecmd = 'update'
|
||||
else:
|
||||
basecmd = 'install'
|
||||
pkg, version, repo = line
|
||||
name, dist = pkg.rsplit('.', 1)
|
||||
updates.update({name: {'version': version, 'dist': dist, 'repo': repo}})
|
||||
elif rc == 1:
|
||||
res['msg'] = err
|
||||
res['rc'] = rc
|
||||
module.fail_json(**res)
|
||||
|
||||
if update_all:
|
||||
cmd = yum_basecmd + ['update']
|
||||
else:
|
||||
for spec in items:
|
||||
# some guess work involved with groups. update @<group> will install the group if missing
|
||||
if spec.startswith('@'):
|
||||
pkgs['update'].append(spec)
|
||||
continue
|
||||
# dep/pkgname - find it
|
||||
else:
|
||||
if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
pkgs['update'].append(spec)
|
||||
else:
|
||||
pkgs['install'].append(spec)
|
||||
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos)
|
||||
# FIXME..? may not be desirable to throw an exception here if a single package is missing
|
||||
if not pkglist:
|
||||
res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
|
||||
module.fail_json(**res)
|
||||
|
||||
|
||||
nothing_to_do = True
|
||||
for this in pkglist:
|
||||
if basecmd == 'install' and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
if spec in pkgs['install'] and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
nothing_to_do = False
|
||||
break
|
||||
|
||||
if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos):
|
||||
nothing_to_do = False
|
||||
break
|
||||
|
||||
|
||||
if spec in pkgs['update'] and spec in updates.keys():
|
||||
nothing_to_do = False
|
||||
|
||||
if nothing_to_do:
|
||||
res['results'].append("All packages providing %s are up to date" % spec)
|
||||
continue
|
||||
|
@ -763,27 +798,60 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
|
|||
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
|
||||
module.fail_json(**res)
|
||||
|
||||
pkg = spec
|
||||
if not cmd:
|
||||
cmd = yum_basecmd + [basecmd, pkg]
|
||||
# list of package updates
|
||||
if update_all:
|
||||
will_update = updates.keys()
|
||||
else:
|
||||
will_update = [u for u in pkgs['update'] if u in updates.keys() or u.startswith('@')]
|
||||
|
||||
if module.check_mode:
|
||||
return module.exit_json(changed=True)
|
||||
# check_mode output
|
||||
if module.check_mode:
|
||||
to_update = []
|
||||
for w in will_update:
|
||||
if w.startswith('@'):
|
||||
to_update.append((w, None))
|
||||
msg = '%s will be updated' % w
|
||||
else:
|
||||
to_update.append((w, '%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo'])))
|
||||
|
||||
rc, out, err = module.run_command(cmd)
|
||||
res['changes'] = dict(installed=pkgs['install'], updated=to_update)
|
||||
|
||||
res['rc'] += rc
|
||||
res['results'].append(out)
|
||||
res['msg'] += err
|
||||
|
||||
# FIXME if it is - update it and check to see if it applied
|
||||
# check to see if there is no longer an update available for the pkgspec
|
||||
|
||||
if rc:
|
||||
res['failed'] = True
|
||||
else:
|
||||
if len(will_update) > 0 or len(pkgs['install']) > 0:
|
||||
res['changed'] = True
|
||||
|
||||
return res
|
||||
|
||||
# run commands
|
||||
if cmd: # update all
|
||||
rc, out, err = module.run_command(cmd)
|
||||
res['changed'] = True
|
||||
else:
|
||||
if len(pkgs['install']) > 0: # install missing
|
||||
cmd = yum_basecmd + ['install'] + pkgs['install']
|
||||
rc, out, err = module.run_command(cmd)
|
||||
res['changed'] = True
|
||||
else:
|
||||
rc, out, err = [0, '', '']
|
||||
|
||||
if len(will_update) > 0: # update present
|
||||
cmd = yum_basecmd + ['update'] + pkgs['update']
|
||||
rc2, out2, err2 = module.run_command(cmd)
|
||||
res['changed'] = True
|
||||
else:
|
||||
rc2, out2, err2 = [0, '', '']
|
||||
|
||||
if not update_all:
|
||||
rc += rc2
|
||||
out += out2
|
||||
err += err2
|
||||
|
||||
res['rc'] += rc
|
||||
res['msg'] += err
|
||||
res['results'].append(out)
|
||||
|
||||
if rc:
|
||||
res['failed'] = True
|
||||
|
||||
return res
|
||||
|
||||
def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo,
|
||||
|
@ -904,10 +972,15 @@ def main():
|
|||
# loaded and plugins are discovered
|
||||
my.conf
|
||||
repoquery = None
|
||||
if 'rhnplugin' in my.plugins._plugins:
|
||||
repoquerybin = ensure_yum_utils(module)
|
||||
if repoquerybin:
|
||||
repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
|
||||
try:
|
||||
yum_plugins = my.plugins._plugins
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
if 'rhnplugin' in yum_plugins:
|
||||
repoquerybin = ensure_yum_utils(module)
|
||||
if repoquerybin:
|
||||
repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
|
||||
|
||||
pkg = [ p.strip() for p in params['name']]
|
||||
exclude = params['exclude']
|
||||
|
@ -927,4 +1000,3 @@ from ansible.module_utils.basic import *
|
|||
from ansible.module_utils.urls import *
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
|
|
@ -173,7 +173,8 @@ options:
|
|||
to be installed. The commit MUST be signed and the public key MUST
|
||||
be trusted in the GPG trustdb.
|
||||
|
||||
|
||||
requirements:
|
||||
- git (the command line tool)
|
||||
notes:
|
||||
- "If the task seems to be hanging, first verify remote host is in C(known_hosts).
|
||||
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
|
||||
|
@ -490,10 +491,20 @@ def get_head_branch(git_path, module, dest, remote, bare=False):
|
|||
f.close()
|
||||
return branch
|
||||
|
||||
def fetch(git_path, module, repo, dest, version, remote, bare, refspec):
|
||||
def set_remote_url(git_path, module, repo, dest, remote):
|
||||
''' updates repo from remote sources '''
|
||||
commands = [("set a new url %s for %s" % (repo, remote), [git_path, 'remote', 'set-url', remote, repo])]
|
||||
|
||||
for (label,command) in commands:
|
||||
(rc,out,err) = module.run_command(command, cwd=dest)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))
|
||||
|
||||
def fetch(git_path, module, repo, dest, version, remote, bare, refspec):
|
||||
''' updates repo from remote sources '''
|
||||
set_remote_url(git_path, module, repo, dest, remote)
|
||||
commands = []
|
||||
|
||||
fetch_str = 'download remote objects and refs'
|
||||
|
||||
if bare:
|
||||
|
@ -740,6 +751,7 @@ def main():
|
|||
if not module.check_mode:
|
||||
reset(git_path, module, dest)
|
||||
# exit if already at desired sha version
|
||||
set_remote_url(git_path, module, repo, dest, remote)
|
||||
remote_head = get_remote_head(git_path, module, dest, version, remote, bare)
|
||||
if before == remote_head:
|
||||
if local_mods:
|
||||
|
|
|
@ -65,6 +65,13 @@ options:
|
|||
required: false
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
update:
|
||||
required: false
|
||||
default: "yes"
|
||||
choices: [ "yes", "no" ]
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- If C(no), do not retrieve new revisions from the origin repository
|
||||
executable:
|
||||
required: false
|
||||
default: null
|
||||
|
@ -210,6 +217,7 @@ def main():
|
|||
revision = dict(default=None, aliases=['version']),
|
||||
force = dict(default='no', type='bool'),
|
||||
purge = dict(default='no', type='bool'),
|
||||
update = dict(default='yes', type='bool'),
|
||||
executable = dict(default=None),
|
||||
),
|
||||
)
|
||||
|
@ -218,6 +226,7 @@ def main():
|
|||
revision = module.params['revision']
|
||||
force = module.params['force']
|
||||
purge = module.params['purge']
|
||||
update = module.params['update']
|
||||
hg_path = module.params['executable'] or module.get_bin_path('hg', True)
|
||||
hgrc = os.path.join(dest, '.hg/hgrc')
|
||||
|
||||
|
@ -234,6 +243,9 @@ def main():
|
|||
(rc, out, err) = hg.clone()
|
||||
if rc != 0:
|
||||
module.fail_json(msg=err)
|
||||
elif not update:
|
||||
# Just return having found a repo already in the dest path
|
||||
before = hg.get_revision()
|
||||
elif hg.at_revision:
|
||||
# no update needed, don't pull
|
||||
before = hg.get_revision()
|
||||
|
|
|
@ -78,6 +78,13 @@ options:
|
|||
version_added: "1.6"
|
||||
description:
|
||||
- If C(yes), do export instead of checkout/update.
|
||||
switch:
|
||||
required: false
|
||||
default: "yes"
|
||||
choices: [ "yes", "no" ]
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- If C(no), do not call svn switch before update.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -103,7 +110,8 @@ class Subversion(object):
|
|||
self.password = password
|
||||
self.svn_path = svn_path
|
||||
|
||||
def _exec(self, args):
|
||||
def _exec(self, args, check_rc=True):
|
||||
'''Execute a subversion command, and return output. If check_rc is False, returns the return code instead of the output.'''
|
||||
bits = [
|
||||
self.svn_path,
|
||||
'--non-interactive',
|
||||
|
@ -115,13 +123,21 @@ class Subversion(object):
|
|||
if self.password:
|
||||
bits.extend(["--password", self.password])
|
||||
bits.extend(args)
|
||||
rc, out, err = self.module.run_command(bits, check_rc=True)
|
||||
return out.splitlines()
|
||||
rc, out, err = self.module.run_command(bits, check_rc)
|
||||
if check_rc:
|
||||
return out.splitlines()
|
||||
else:
|
||||
return rc
|
||||
|
||||
def is_svn_repo(self):
|
||||
'''Checks if path is a SVN Repo.'''
|
||||
rc = self._exec(["info", self.dest], check_rc=False)
|
||||
return rc == 0
|
||||
|
||||
def checkout(self):
|
||||
'''Creates new svn working directory if it does not already exist.'''
|
||||
self._exec(["checkout", "-r", self.revision, self.repo, self.dest])
|
||||
|
||||
|
||||
def export(self, force=False):
|
||||
'''Export svn repo to directory'''
|
||||
cmd = ["export"]
|
||||
|
@ -153,8 +169,9 @@ class Subversion(object):
|
|||
|
||||
def has_local_mods(self):
|
||||
'''True if revisioned files have been added or modified. Unrevisioned files are ignored.'''
|
||||
lines = self._exec(["status", "--quiet", self.dest])
|
||||
lines = self._exec(["status", "--quiet", "--ignore-externals", self.dest])
|
||||
# The --quiet option will return only modified files.
|
||||
|
||||
# Has local mods if more than 0 modifed revisioned files.
|
||||
return len(filter(len, lines)) > 0
|
||||
|
||||
|
@ -183,6 +200,7 @@ def main():
|
|||
password=dict(required=False),
|
||||
executable=dict(default=None),
|
||||
export=dict(default=False, required=False, type='bool'),
|
||||
switch=dict(default=True, required=False, type='bool'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
@ -195,6 +213,7 @@ def main():
|
|||
password = module.params['password']
|
||||
svn_path = module.params['executable'] or module.get_bin_path('svn', True)
|
||||
export = module.params['export']
|
||||
switch = module.params['switch']
|
||||
|
||||
os.environ['LANG'] = 'C'
|
||||
svn = Subversion(module, dest, repo, revision, username, password, svn_path)
|
||||
|
@ -208,7 +227,7 @@ def main():
|
|||
svn.checkout()
|
||||
else:
|
||||
svn.export(force=force)
|
||||
elif os.path.exists("%s/.svn" % (dest, )):
|
||||
elif svn.is_svn_repo():
|
||||
# Order matters. Need to get local mods before switch to avoid false
|
||||
# positives. Need to switch before revert to ensure we are reverting to
|
||||
# correct repo.
|
||||
|
@ -217,7 +236,8 @@ def main():
|
|||
module.exit_json(changed=check, before=before, after=after)
|
||||
before = svn.get_revision()
|
||||
local_mods = svn.has_local_mods()
|
||||
svn.switch()
|
||||
if switch:
|
||||
svn.switch()
|
||||
if local_mods:
|
||||
if force:
|
||||
svn.revert()
|
||||
|
|
|
@ -34,7 +34,6 @@ options:
|
|||
- The username on the remote host whose authorized_keys file will be modified
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
key:
|
||||
description:
|
||||
- The SSH public key(s), as a string or (since 1.9) url (https://github.com/username.keys)
|
||||
|
@ -72,9 +71,11 @@ options:
|
|||
version_added: "1.4"
|
||||
exclusive:
|
||||
description:
|
||||
- Whether to remove all other non-specified keys from the
|
||||
authorized_keys file. Multiple keys can be specified in a single
|
||||
key= string value by separating them by newlines.
|
||||
- Whether to remove all other non-specified keys from the authorized_keys file. Multiple keys
|
||||
can be specified in a single C(key) string value by separating them by newlines.
|
||||
- This option is not loop aware, so if you use C(with_) , it will be exclusive per iteration
|
||||
of the loop, if you want multiple keys in the file you need to pass them all to C(key) in a
|
||||
single batch as mentioned above.
|
||||
required: false
|
||||
choices: [ "yes", "no" ]
|
||||
default: "no"
|
||||
|
@ -108,11 +109,13 @@ EXAMPLES = '''
|
|||
# Using key_options:
|
||||
- authorized_key: user=charlie
|
||||
key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
|
||||
key_options='no-port-forwarding,host="10.0.1.1"'
|
||||
key_options='no-port-forwarding,from="10.0.1.1"'
|
||||
|
||||
# Set up authorized_keys exclusively with one key
|
||||
- authorized_key: user=root key=public_keys/doe-jane state=present
|
||||
- authorized_key: user=root key="{{ item }}" state=present
|
||||
exclusive=yes
|
||||
with_file:
|
||||
- public_keys/doe-jane
|
||||
'''
|
||||
|
||||
# Makes sure the public key line is present or absent in the user's .ssh/authorized_keys.
|
||||
|
@ -138,7 +141,7 @@ import shlex
|
|||
class keydict(dict):
|
||||
|
||||
""" a dictionary that maintains the order of keys as they are added """
|
||||
|
||||
|
||||
# http://stackoverflow.com/questions/2328235/pythonextend-the-dict-class
|
||||
|
||||
def __init__(self, *args, **kw):
|
||||
|
@ -146,7 +149,7 @@ class keydict(dict):
|
|||
self.itemlist = super(keydict,self).keys()
|
||||
def __setitem__(self, key, value):
|
||||
self.itemlist.append(key)
|
||||
super(keydict,self).__setitem__(key, value)
|
||||
super(keydict,self).__setitem__(key, value)
|
||||
def __iter__(self):
|
||||
return iter(self.itemlist)
|
||||
def keys(self):
|
||||
|
@ -154,7 +157,7 @@ class keydict(dict):
|
|||
def values(self):
|
||||
return [self[key] for key in self]
|
||||
def itervalues(self):
|
||||
return (self[key] for key in self)
|
||||
return (self[key] for key in self)
|
||||
|
||||
def keyfile(module, user, write=False, path=None, manage_dir=True):
|
||||
"""
|
||||
|
@ -168,9 +171,15 @@ def keyfile(module, user, write=False, path=None, manage_dir=True):
|
|||
:return: full path string to authorized_keys for user
|
||||
"""
|
||||
|
||||
if module.check_mode and path is not None:
|
||||
keysfile = path
|
||||
return keysfile
|
||||
|
||||
try:
|
||||
user_entry = pwd.getpwnam(user)
|
||||
except KeyError, e:
|
||||
if module.check_mode and path is None:
|
||||
module.fail_json(msg="Either user must exist or you must provide full path to key file in check mode")
|
||||
module.fail_json(msg="Failed to lookup user %s: %s" % (user, str(e)))
|
||||
if path is None:
|
||||
homedir = user_entry.pw_dir
|
||||
|
@ -214,8 +223,8 @@ def keyfile(module, user, write=False, path=None, manage_dir=True):
|
|||
return keysfile
|
||||
|
||||
def parseoptions(module, options):
|
||||
'''
|
||||
reads a string containing ssh-key options
|
||||
'''
|
||||
reads a string containing ssh-key options
|
||||
and returns a dictionary of those options
|
||||
'''
|
||||
options_dict = keydict() #ordered dict
|
||||
|
@ -246,7 +255,7 @@ def parsekey(module, raw_key):
|
|||
'ssh-ed25519',
|
||||
'ecdsa-sha2-nistp256',
|
||||
'ecdsa-sha2-nistp384',
|
||||
'ecdsa-sha2-nistp521',
|
||||
'ecdsa-sha2-nistp521',
|
||||
'ssh-dss',
|
||||
'ssh-rsa',
|
||||
]
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
# (c) 2012, Dane Summers <dsummers@pinedesk.biz>
|
||||
# (c) 2013, Mike Grozak <mike.grozak@gmail.com>
|
||||
# (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
|
||||
# (c) 2015, Evan Kaufman <evan@digitalflophouse.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
|
@ -46,7 +47,7 @@ options:
|
|||
description:
|
||||
- Description of a crontab entry.
|
||||
default: null
|
||||
required: true
|
||||
required: false
|
||||
user:
|
||||
description:
|
||||
- The specific user whose crontab should be modified.
|
||||
|
@ -116,10 +117,19 @@ options:
|
|||
required: false
|
||||
default: null
|
||||
choices: [ "reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly" ]
|
||||
disabled:
|
||||
description:
|
||||
- If the job should be disabled (commented out) in the crontab. Only has effect if state=present
|
||||
version_added: "2.0"
|
||||
required: false
|
||||
default: false
|
||||
requirements:
|
||||
- cron
|
||||
author: "Dane Summers (@dsummersl)"
|
||||
updates: [ 'Mike Grozak', 'Patrick Callahan' ]
|
||||
author:
|
||||
- "Dane Summers (@dsummersl)"
|
||||
- 'Mike Grozak'
|
||||
- 'Patrick Callahan'
|
||||
- 'Evan Kaufman (@EvanK)'
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -290,17 +300,22 @@ class CronTab(object):
|
|||
|
||||
return []
|
||||
|
||||
def get_cron_job(self,minute,hour,day,month,weekday,job,special):
|
||||
def get_cron_job(self,minute,hour,day,month,weekday,job,special,disabled):
|
||||
if disabled:
|
||||
disable_prefix = '#'
|
||||
else:
|
||||
disable_prefix = ''
|
||||
|
||||
if special:
|
||||
if self.cron_file:
|
||||
return "@%s %s %s" % (special, self.user, job)
|
||||
return "%s@%s %s %s" % (disable_prefix, special, self.user, job)
|
||||
else:
|
||||
return "@%s %s" % (special, job)
|
||||
return "%s@%s %s" % (disable_prefix, special, job)
|
||||
else:
|
||||
if self.cron_file:
|
||||
return "%s %s %s %s %s %s %s" % (minute,hour,day,month,weekday,self.user,job)
|
||||
return "%s%s %s %s %s %s %s %s" % (disable_prefix,minute,hour,day,month,weekday,self.user,job)
|
||||
else:
|
||||
return "%s %s %s %s %s %s" % (minute,hour,day,month,weekday,job)
|
||||
return "%s%s %s %s %s %s %s" % (disable_prefix,minute,hour,day,month,weekday,job)
|
||||
|
||||
return None
|
||||
|
||||
|
@ -398,7 +413,7 @@ def main():
|
|||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
name=dict(required=True),
|
||||
name=dict(required=False),
|
||||
user=dict(required=False),
|
||||
job=dict(required=False),
|
||||
cron_file=dict(required=False),
|
||||
|
@ -413,7 +428,8 @@ def main():
|
|||
special_time=dict(required=False,
|
||||
default=None,
|
||||
choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"],
|
||||
type='str')
|
||||
type='str'),
|
||||
disabled=dict(default=False, type='bool')
|
||||
),
|
||||
supports_check_mode = False,
|
||||
)
|
||||
|
@ -431,6 +447,7 @@ def main():
|
|||
weekday = module.params['weekday']
|
||||
reboot = module.params['reboot']
|
||||
special_time = module.params['special_time']
|
||||
disabled = module.params['disabled']
|
||||
do_install = state == 'present'
|
||||
|
||||
changed = False
|
||||
|
@ -481,7 +498,7 @@ def main():
|
|||
changed = crontab.remove_job_file()
|
||||
module.exit_json(changed=changed,cron_file=cron_file,state=state)
|
||||
|
||||
job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time)
|
||||
job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
|
||||
old_job = crontab.find_job(name)
|
||||
|
||||
if do_install:
|
||||
|
|
|
@ -121,7 +121,7 @@ class Group(object):
|
|||
if len(cmd) == 1:
|
||||
return (None, '', '')
|
||||
if self.module.check_mode:
|
||||
return (0, '', '')
|
||||
return (0, '', '')
|
||||
cmd.append(self.name)
|
||||
return self.execute_command(cmd)
|
||||
|
||||
|
@ -233,7 +233,8 @@ class FreeBsdGroup(Group):
|
|||
def group_add(self, **kwargs):
|
||||
cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name]
|
||||
if self.gid is not None:
|
||||
cmd.append('-g %d' % int(self.gid))
|
||||
cmd.append('-g')
|
||||
cmd.append('%d' % int(self.gid))
|
||||
return self.execute_command(cmd)
|
||||
|
||||
def group_mod(self, **kwargs):
|
||||
|
@ -241,7 +242,8 @@ class FreeBsdGroup(Group):
|
|||
info = self.group_info()
|
||||
cmd_len = len(cmd)
|
||||
if self.gid is not None and int(self.gid) != info[2]:
|
||||
cmd.append('-g %d' % int(self.gid))
|
||||
cmd.append('-g')
|
||||
cmd.append('%d' % int(self.gid))
|
||||
# modify the group if cmd will do anything
|
||||
if cmd_len != len(cmd):
|
||||
if self.module.check_mode:
|
||||
|
@ -271,7 +273,8 @@ class DarwinGroup(Group):
|
|||
def group_add(self, **kwargs):
|
||||
cmd = [self.module.get_bin_path('dseditgroup', True)]
|
||||
cmd += [ '-o', 'create' ]
|
||||
cmd += [ '-i', self.gid ]
|
||||
if self.gid is not None:
|
||||
cmd += [ '-i', self.gid ]
|
||||
cmd += [ '-L', self.name ]
|
||||
(rc, out, err) = self.execute_command(cmd)
|
||||
return (rc, out, err)
|
||||
|
@ -283,12 +286,13 @@ class DarwinGroup(Group):
|
|||
(rc, out, err) = self.execute_command(cmd)
|
||||
return (rc, out, err)
|
||||
|
||||
def group_mod(self):
|
||||
def group_mod(self, gid=None):
|
||||
info = self.group_info()
|
||||
if self.gid is not None and int(self.gid) != info[2]:
|
||||
cmd = [self.module.get_bin_path('dseditgroup', True)]
|
||||
cmd += [ '-o', 'edit' ]
|
||||
cmd += [ '-i', self.gid ]
|
||||
if gid is not None:
|
||||
cmd += [ '-i', gid ]
|
||||
cmd += [ '-L', self.name ]
|
||||
(rc, out, err) = self.execute_command(cmd)
|
||||
return (rc, out, err)
|
||||
|
|
|
@ -21,7 +21,9 @@
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: hostname
|
||||
author: "Hiroaki Nakamura (@hnakamur)"
|
||||
author:
|
||||
- "Hiroaki Nakamura (@hnakamur)"
|
||||
- "Hideki Saito (@saito-hideki)"
|
||||
version_added: "1.4"
|
||||
short_description: Manage hostname
|
||||
requirements: [ hostname ]
|
||||
|
@ -116,13 +118,13 @@ class GenericStrategy(object):
|
|||
- set_current_hostname(name)
|
||||
- set_permanent_hostname(name)
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
HOSTNAME_CMD = '/bin/hostname'
|
||||
self.hostname_cmd = self.module.get_bin_path('hostname', True)
|
||||
|
||||
def get_current_hostname(self):
|
||||
cmd = [self.HOSTNAME_CMD]
|
||||
cmd = [self.hostname_cmd]
|
||||
rc, out, err = self.module.run_command(cmd)
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
|
||||
|
@ -130,7 +132,7 @@ class GenericStrategy(object):
|
|||
return out.strip()
|
||||
|
||||
def set_current_hostname(self, name):
|
||||
cmd = [self.HOSTNAME_CMD, name]
|
||||
cmd = [self.hostname_cmd, name]
|
||||
rc, out, err = self.module.run_command(cmd)
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
|
||||
|
@ -363,6 +365,39 @@ class OpenBSDStrategy(GenericStrategy):
|
|||
|
||||
# ===========================================
|
||||
|
||||
class SolarisStrategy(GenericStrategy):
|
||||
"""
|
||||
This is a Solaris11 or later Hostname manipulation strategy class - it
|
||||
execute hostname command.
|
||||
"""
|
||||
|
||||
def set_current_hostname(self, name):
|
||||
cmd_option = '-t'
|
||||
cmd = [self.hostname_cmd, cmd_option, name]
|
||||
rc, out, err = self.module.run_command(cmd)
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
|
||||
(rc, out, err))
|
||||
|
||||
def get_permanent_hostname(self):
|
||||
fmri = 'svc:/system/identity:node'
|
||||
pattern = 'config/nodename'
|
||||
cmd = '/usr/sbin/svccfg -s %s listprop -o value %s' % (fmri, pattern)
|
||||
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
|
||||
(rc, out, err))
|
||||
return out.strip()
|
||||
|
||||
def set_permanent_hostname(self, name):
|
||||
cmd = [self.hostname_cmd, name]
|
||||
rc, out, err = self.module.run_command(cmd)
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
|
||||
(rc, out, err))
|
||||
|
||||
# ===========================================
|
||||
|
||||
class FedoraHostname(Hostname):
|
||||
platform = 'Linux'
|
||||
distribution = 'Fedora'
|
||||
|
@ -456,6 +491,11 @@ class DebianHostname(Hostname):
|
|||
distribution = 'Debian'
|
||||
strategy_class = DebianStrategy
|
||||
|
||||
class KaliHostname(Hostname):
|
||||
platform = 'Linux'
|
||||
distribution = 'Kali'
|
||||
strategy_class = DebianStrategy
|
||||
|
||||
class UbuntuHostname(Hostname):
|
||||
platform = 'Linux'
|
||||
distribution = 'Ubuntu'
|
||||
|
@ -486,6 +526,11 @@ class OpenBSDHostname(Hostname):
|
|||
distribution = None
|
||||
strategy_class = OpenBSDStrategy
|
||||
|
||||
class SolarisHostname(Hostname):
|
||||
platform = 'SunOS'
|
||||
distribution = None
|
||||
strategy_class = SolarisStrategy
|
||||
|
||||
# ===========================================
|
||||
|
||||
def main():
|
||||
|
|
|
@ -104,7 +104,11 @@ def write_fstab(lines, dest):
|
|||
fs_w.flush()
|
||||
fs_w.close()
|
||||
|
||||
def set_mount(**kwargs):
|
||||
def _escape_fstab(v):
|
||||
""" escape space (040), ampersand (046) and backslash (134) which are invalid in fstab fields """
|
||||
return v.replace('\\', '\\134').replace(' ', '\\040').replace('&', '\\046')
|
||||
|
||||
def set_mount(module, **kwargs):
|
||||
""" set/change a mount point location in fstab """
|
||||
|
||||
# kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab
|
||||
|
@ -116,11 +120,17 @@ def set_mount(**kwargs):
|
|||
)
|
||||
args.update(kwargs)
|
||||
|
||||
# save the mount name before space replacement
|
||||
origname = args['name']
|
||||
# replace any space in mount name with '\040' to make it fstab compatible (man fstab)
|
||||
args['name'] = args['name'].replace(' ', r'\040')
|
||||
|
||||
new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n'
|
||||
|
||||
to_write = []
|
||||
exists = False
|
||||
changed = False
|
||||
escaped_args = dict([(k, _escape_fstab(v)) for k, v in args.iteritems()])
|
||||
for line in open(args['fstab'], 'r').readlines():
|
||||
if not line.strip():
|
||||
to_write.append(line)
|
||||
|
@ -137,16 +147,16 @@ def set_mount(**kwargs):
|
|||
ld = {}
|
||||
ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split()
|
||||
|
||||
if ld['name'] != args['name']:
|
||||
if ld['name'] != escaped_args['name']:
|
||||
to_write.append(line)
|
||||
continue
|
||||
|
||||
# it exists - now see if what we have is different
|
||||
exists = True
|
||||
for t in ('src', 'fstype','opts', 'dump', 'passno'):
|
||||
if ld[t] != args[t]:
|
||||
if ld[t] != escaped_args[t]:
|
||||
changed = True
|
||||
ld[t] = args[t]
|
||||
ld[t] = escaped_args[t]
|
||||
|
||||
if changed:
|
||||
to_write.append(new_line % ld)
|
||||
|
@ -157,13 +167,14 @@ def set_mount(**kwargs):
|
|||
to_write.append(new_line % args)
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
if changed and not module.check_mode:
|
||||
write_fstab(to_write, args['fstab'])
|
||||
|
||||
return (args['name'], changed)
|
||||
# mount function needs origname
|
||||
return (origname, changed)
|
||||
|
||||
|
||||
def unset_mount(**kwargs):
|
||||
def unset_mount(module, **kwargs):
|
||||
""" remove a mount point from fstab """
|
||||
|
||||
# kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab
|
||||
|
@ -175,8 +186,14 @@ def unset_mount(**kwargs):
|
|||
)
|
||||
args.update(kwargs)
|
||||
|
||||
# save the mount name before space replacement
|
||||
origname = args['name']
|
||||
# replace any space in mount name with '\040' to make it fstab compatible (man fstab)
|
||||
args['name'] = args['name'].replace(' ', r'\040')
|
||||
|
||||
to_write = []
|
||||
changed = False
|
||||
escaped_name = _escape_fstab(args['name'])
|
||||
for line in open(args['fstab'], 'r').readlines():
|
||||
if not line.strip():
|
||||
to_write.append(line)
|
||||
|
@ -193,28 +210,45 @@ def unset_mount(**kwargs):
|
|||
ld = {}
|
||||
ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split()
|
||||
|
||||
if ld['name'] != args['name']:
|
||||
if ld['name'] != escaped_name:
|
||||
to_write.append(line)
|
||||
continue
|
||||
|
||||
# if we got here we found a match - continue and mark changed
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
if changed and not module.check_mode:
|
||||
write_fstab(to_write, args['fstab'])
|
||||
|
||||
return (args['name'], changed)
|
||||
# umount needs origname
|
||||
return (origname, changed)
|
||||
|
||||
|
||||
def mount(module, **kwargs):
|
||||
""" mount up a path or remount if needed """
|
||||
|
||||
# kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab
|
||||
args = dict(
|
||||
opts = 'default',
|
||||
dump = '0',
|
||||
passno = '0',
|
||||
fstab = '/etc/fstab'
|
||||
)
|
||||
args.update(kwargs)
|
||||
|
||||
mount_bin = module.get_bin_path('mount')
|
||||
|
||||
name = kwargs['name']
|
||||
|
||||
cmd = [ mount_bin, ]
|
||||
|
||||
if os.path.ismount(name):
|
||||
cmd = [ mount_bin , '-o', 'remount', name ]
|
||||
else:
|
||||
cmd = [ mount_bin, name ]
|
||||
cmd += [ '-o', 'remount', ]
|
||||
|
||||
if get_platform().lower() == 'freebsd':
|
||||
cmd += [ '-F', args['fstab'], ]
|
||||
|
||||
cmd += [ name, ]
|
||||
|
||||
rc, out, err = module.run_command(cmd)
|
||||
if rc == 0:
|
||||
|
@ -247,7 +281,8 @@ def main():
|
|||
src = dict(required=True),
|
||||
fstype = dict(required=True),
|
||||
fstab = dict(default='/etc/fstab')
|
||||
)
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
|
||||
|
@ -262,8 +297,6 @@ def main():
|
|||
args['passno'] = module.params['passno']
|
||||
if module.params['opts'] is not None:
|
||||
args['opts'] = module.params['opts']
|
||||
if ' ' in args['opts']:
|
||||
module.fail_json(msg="unexpected space in 'opts' parameter")
|
||||
if module.params['dump'] is not None:
|
||||
args['dump'] = module.params['dump']
|
||||
if module.params['fstab'] is not None:
|
||||
|
@ -284,8 +317,8 @@ def main():
|
|||
state = module.params['state']
|
||||
name = module.params['name']
|
||||
if state == 'absent':
|
||||
name, changed = unset_mount(**args)
|
||||
if changed:
|
||||
name, changed = unset_mount(module, **args)
|
||||
if changed and not module.check_mode:
|
||||
if os.path.ismount(name):
|
||||
res,msg = umount(module, **args)
|
||||
if res:
|
||||
|
@ -301,26 +334,27 @@ def main():
|
|||
|
||||
if state == 'unmounted':
|
||||
if os.path.ismount(name):
|
||||
res,msg = umount(module, **args)
|
||||
if res:
|
||||
module.fail_json(msg="Error unmounting %s: %s" % (name, msg))
|
||||
if not module.check_mode:
|
||||
res,msg = umount(module, **args)
|
||||
if res:
|
||||
module.fail_json(msg="Error unmounting %s: %s" % (name, msg))
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed, **args)
|
||||
|
||||
if state in ['mounted', 'present']:
|
||||
if state == 'mounted':
|
||||
if not os.path.exists(name):
|
||||
if not os.path.exists(name) and not module.check_mode:
|
||||
try:
|
||||
os.makedirs(name)
|
||||
except (OSError, IOError), e:
|
||||
module.fail_json(msg="Error making dir %s: %s" % (name, str(e)))
|
||||
|
||||
name, changed = set_mount(**args)
|
||||
name, changed = set_mount(module, **args)
|
||||
if state == 'mounted':
|
||||
res = 0
|
||||
if os.path.ismount(name):
|
||||
if changed:
|
||||
if changed and not module.check_mode:
|
||||
res,msg = mount(module, **args)
|
||||
elif 'bind' in args.get('opts', []):
|
||||
changed = True
|
||||
|
@ -335,7 +369,9 @@ def main():
|
|||
res,msg = mount(module, **args)
|
||||
else:
|
||||
changed = True
|
||||
res,msg = mount(module, **args)
|
||||
if not module.check_mode:
|
||||
res,msg = mount(module, **args)
|
||||
|
||||
|
||||
if res:
|
||||
module.fail_json(msg="Error mounting %s: %s" % (name, msg))
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: service
|
||||
author:
|
||||
author:
|
||||
- "Ansible Core Team"
|
||||
- "Michael DeHaan"
|
||||
version_added: "0.1"
|
||||
|
@ -359,7 +359,7 @@ class Service(object):
|
|||
self.changed = True
|
||||
|
||||
# Add line to the list.
|
||||
new_rc_conf.append(rcline)
|
||||
new_rc_conf.append(rcline.strip() + '\n')
|
||||
|
||||
# We are done with reading the current rc.conf, close it.
|
||||
RCFILE.close()
|
||||
|
@ -503,15 +503,31 @@ class LinuxService(Service):
|
|||
self.svc_initctl = location['initctl']
|
||||
|
||||
def get_systemd_service_enabled(self):
|
||||
(rc, out, err) = self.execute_command("%s is-enabled %s" % (self.enable_cmd, self.__systemd_unit,))
|
||||
def sysv_exists(name):
|
||||
script = '/etc/init.d/' + name
|
||||
return os.access(script, os.X_OK)
|
||||
|
||||
def sysv_is_enabled(name):
|
||||
return bool(glob.glob('/etc/rc?.d/S??' + name))
|
||||
|
||||
service_name = self.__systemd_unit
|
||||
(rc, out, err) = self.execute_command("%s is-enabled %s" % (self.enable_cmd, service_name,))
|
||||
if rc == 0:
|
||||
return True
|
||||
return False
|
||||
elif sysv_exists(service_name):
|
||||
return sysv_is_enabled(service_name)
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_systemd_status_dict(self):
|
||||
(rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit,))
|
||||
|
||||
# Check status first as show will not fail if service does not exist
|
||||
(rc, out, err) = self.execute_command("%s show '%s'" % (self.enable_cmd, self.__systemd_unit,))
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, self.__systemd_unit, err))
|
||||
elif 'LoadState=not-found' in out:
|
||||
self.module.fail_json(msg='systemd could not find the requested service "%r": %s' % (self.__systemd_unit, err))
|
||||
|
||||
key = None
|
||||
value_buffer = []
|
||||
status_dict = {}
|
||||
|
@ -579,6 +595,11 @@ class LinuxService(Service):
|
|||
self.running = "started" in openrc_status_stdout
|
||||
self.crashed = "crashed" in openrc_status_stderr
|
||||
|
||||
# Prefer a non-zero return code. For reference, see:
|
||||
# http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html
|
||||
if self.running is None and rc in [1, 2, 3, 4, 69]:
|
||||
self.running = False
|
||||
|
||||
# if the job status is still not known check it by status output keywords
|
||||
# Only check keywords if there's only one line of output (some init
|
||||
# scripts will output verbosely in case of error and those can emit
|
||||
|
@ -603,14 +624,10 @@ class LinuxService(Service):
|
|||
elif 'dead but pid file exists' in cleanout:
|
||||
self.running = False
|
||||
|
||||
# if the job status is still not known check it by response code
|
||||
# For reference, see:
|
||||
# http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html
|
||||
if self.running is None:
|
||||
if rc in [1, 2, 3, 4, 69]:
|
||||
self.running = False
|
||||
elif rc == 0:
|
||||
self.running = True
|
||||
# if the job status is still not known and we got a zero for the
|
||||
# return code, assume here that the service is running
|
||||
if self.running is None and rc == 0:
|
||||
self.running = True
|
||||
|
||||
# if the job status is still not known check it by special conditions
|
||||
if self.running is None:
|
||||
|
@ -885,7 +902,7 @@ class LinuxService(Service):
|
|||
if self.svc_cmd and self.svc_cmd.endswith('rc-service') and self.action == 'start' and self.crashed:
|
||||
self.execute_command("%s zap" % svc_cmd, daemonize=True)
|
||||
|
||||
if self.action is not "restart":
|
||||
if self.action != "restart":
|
||||
if svc_cmd != '':
|
||||
# upstart or systemd or OpenRC
|
||||
rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True)
|
||||
|
@ -968,7 +985,11 @@ class FreeBsdService(Service):
|
|||
|
||||
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments))
|
||||
cmd = "%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments)
|
||||
rcvars = shlex.split(stdout, comments=True)
|
||||
try:
|
||||
rcvars = shlex.split(stdout, comments=True)
|
||||
except:
|
||||
#TODO: add a warning to the output with the failure
|
||||
pass
|
||||
|
||||
if not rcvars:
|
||||
self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr)
|
||||
|
@ -988,16 +1009,16 @@ class FreeBsdService(Service):
|
|||
|
||||
try:
|
||||
return self.service_enable_rcconf()
|
||||
except:
|
||||
except Exception:
|
||||
self.module.fail_json(msg='unable to set rcvar')
|
||||
|
||||
def service_control(self):
|
||||
|
||||
if self.action is "start":
|
||||
if self.action == "start":
|
||||
self.action = "onestart"
|
||||
if self.action is "stop":
|
||||
if self.action == "stop":
|
||||
self.action = "onestop"
|
||||
if self.action is "reload":
|
||||
if self.action == "reload":
|
||||
self.action = "onereload"
|
||||
|
||||
return self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, self.action, self.arguments))
|
||||
|
@ -1203,9 +1224,9 @@ class NetBsdService(Service):
|
|||
self.running = True
|
||||
|
||||
def service_control(self):
|
||||
if self.action is "start":
|
||||
if self.action == "start":
|
||||
self.action = "onestart"
|
||||
if self.action is "stop":
|
||||
if self.action == "stop":
|
||||
self.action = "onestop"
|
||||
|
||||
self.svc_cmd = "%s" % self.svc_initscript
|
||||
|
|
|
@ -123,7 +123,7 @@ def run_setup(module):
|
|||
setup_result['ansible_facts'][k] = v
|
||||
|
||||
# hack to keep --verbose from showing all the setup module results
|
||||
setup_result['verbose_override'] = True
|
||||
setup_result['_ansible_verbose_override'] = True
|
||||
|
||||
return setup_result
|
||||
|
||||
|
|
57
system/user.py
Normal file → Executable file
57
system/user.py
Normal file → Executable file
|
@ -74,6 +74,10 @@ options:
|
|||
required: false
|
||||
description:
|
||||
- Optionally set the user's home directory.
|
||||
skeleton:
|
||||
required: false
|
||||
description:
|
||||
- Optionally set a home skeleton directory. Requires createhome option!
|
||||
password:
|
||||
required: false
|
||||
description:
|
||||
|
@ -253,13 +257,13 @@ class User(object):
|
|||
self.group = module.params['group']
|
||||
self.groups = module.params['groups']
|
||||
self.comment = module.params['comment']
|
||||
self.home = module.params['home']
|
||||
self.shell = module.params['shell']
|
||||
self.password = module.params['password']
|
||||
self.force = module.params['force']
|
||||
self.remove = module.params['remove']
|
||||
self.createhome = module.params['createhome']
|
||||
self.move_home = module.params['move_home']
|
||||
self.skeleton = module.params['skeleton']
|
||||
self.system = module.params['system']
|
||||
self.login_class = module.params['login_class']
|
||||
self.append = module.params['append']
|
||||
|
@ -269,8 +273,12 @@ class User(object):
|
|||
self.ssh_comment = module.params['ssh_key_comment']
|
||||
self.ssh_passphrase = module.params['ssh_key_passphrase']
|
||||
self.update_password = module.params['update_password']
|
||||
self.home = None
|
||||
self.expires = None
|
||||
|
||||
if module.params['home'] is not None:
|
||||
self.home = os.path.expanduser(module.params['home'])
|
||||
|
||||
if module.params['expires']:
|
||||
try:
|
||||
self.expires = time.gmtime(module.params['expires'])
|
||||
|
@ -360,6 +368,10 @@ class User(object):
|
|||
|
||||
if self.createhome:
|
||||
cmd.append('-m')
|
||||
|
||||
if self.skeleton is not None:
|
||||
cmd.append('-k')
|
||||
cmd.append(self.skeleton)
|
||||
else:
|
||||
cmd.append('-M')
|
||||
|
||||
|
@ -565,11 +577,13 @@ class User(object):
|
|||
|
||||
def ssh_key_gen(self):
|
||||
info = self.user_info()
|
||||
if not os.path.exists(info[5]):
|
||||
if not os.path.exists(info[5]) and not self.module.check_mode:
|
||||
return (1, '', 'User %s home directory does not exist' % self.name)
|
||||
ssh_key_file = self.get_ssh_key_path()
|
||||
ssh_dir = os.path.dirname(ssh_key_file)
|
||||
if not os.path.exists(ssh_dir):
|
||||
if self.module.check_mode:
|
||||
return (0, '', '')
|
||||
try:
|
||||
os.mkdir(ssh_dir, 0700)
|
||||
os.chown(ssh_dir, info[2], info[3])
|
||||
|
@ -577,6 +591,8 @@ class User(object):
|
|||
return (1, '', 'Failed to create %s: %s' % (ssh_dir, str(e)))
|
||||
if os.path.exists(ssh_key_file):
|
||||
return (None, 'Key already exists', '')
|
||||
if self.module.check_mode:
|
||||
return (0, '', '')
|
||||
cmd = [self.module.get_bin_path('ssh-keygen', True)]
|
||||
cmd.append('-t')
|
||||
cmd.append(self.ssh_type)
|
||||
|
@ -635,10 +651,14 @@ class User(object):
|
|||
|
||||
def create_homedir(self, path):
|
||||
if not os.path.exists(path):
|
||||
# use /etc/skel if possible
|
||||
if os.path.exists('/etc/skel'):
|
||||
if self.skeleton is not None:
|
||||
skeleton = self.skeleton
|
||||
else:
|
||||
skeleton = '/etc/skel'
|
||||
|
||||
if os.path.exists(skeleton):
|
||||
try:
|
||||
shutil.copytree('/etc/skel', path, symlinks=True)
|
||||
shutil.copytree(skeleton, path, symlinks=True)
|
||||
except OSError, e:
|
||||
self.module.exit_json(failed=True, msg="%s" % e)
|
||||
else:
|
||||
|
@ -726,6 +746,10 @@ class FreeBsdUser(User):
|
|||
if self.createhome:
|
||||
cmd.append('-m')
|
||||
|
||||
if self.skeleton is not None:
|
||||
cmd.append('-k')
|
||||
cmd.append(self.skeleton)
|
||||
|
||||
if self.shell is not None:
|
||||
cmd.append('-s')
|
||||
cmd.append(self.shell)
|
||||
|
@ -913,13 +937,17 @@ class OpenBSDUser(User):
|
|||
cmd.append('-L')
|
||||
cmd.append(self.login_class)
|
||||
|
||||
if self.password is not None:
|
||||
if self.password is not None and self.password != '*':
|
||||
cmd.append('-p')
|
||||
cmd.append(self.password)
|
||||
|
||||
if self.createhome:
|
||||
cmd.append('-m')
|
||||
|
||||
if self.skeleton is not None:
|
||||
cmd.append('-k')
|
||||
cmd.append(self.skeleton)
|
||||
|
||||
cmd.append(self.name)
|
||||
return self.execute_command(cmd)
|
||||
|
||||
|
@ -1007,7 +1035,8 @@ class OpenBSDUser(User):
|
|||
cmd.append('-L')
|
||||
cmd.append(self.login_class)
|
||||
|
||||
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
|
||||
if self.update_password == 'always' and self.password is not None \
|
||||
and self.password != '*' and info[1] != self.password:
|
||||
cmd.append('-p')
|
||||
cmd.append(self.password)
|
||||
|
||||
|
@ -1087,6 +1116,10 @@ class NetBSDUser(User):
|
|||
if self.createhome:
|
||||
cmd.append('-m')
|
||||
|
||||
if self.skeleton is not None:
|
||||
cmd.append('-k')
|
||||
cmd.append(self.skeleton)
|
||||
|
||||
cmd.append(self.name)
|
||||
return self.execute_command(cmd)
|
||||
|
||||
|
@ -1239,6 +1272,10 @@ class SunOS(User):
|
|||
if self.createhome:
|
||||
cmd.append('-m')
|
||||
|
||||
if self.skeleton is not None:
|
||||
cmd.append('-k')
|
||||
cmd.append(self.skeleton)
|
||||
|
||||
cmd.append(self.name)
|
||||
|
||||
if self.module.check_mode:
|
||||
|
@ -1747,6 +1784,10 @@ class AIX(User):
|
|||
if self.createhome:
|
||||
cmd.append('-m')
|
||||
|
||||
if self.skeleton is not None:
|
||||
cmd.append('-k')
|
||||
cmd.append(self.skeleton)
|
||||
|
||||
cmd.append(self.name)
|
||||
(rc, out, err) = self.execute_command(cmd)
|
||||
|
||||
|
@ -2018,6 +2059,7 @@ def main():
|
|||
remove=dict(default='no', type='bool'),
|
||||
# following options are specific to useradd
|
||||
createhome=dict(default='yes', type='bool'),
|
||||
skeleton=dict(default=None, type='str'),
|
||||
system=dict(default='no', type='bool'),
|
||||
# following options are specific to usermod
|
||||
move_home=dict(default='no', type='bool'),
|
||||
|
@ -2110,6 +2152,7 @@ def main():
|
|||
|
||||
# deal with ssh key
|
||||
if user.sshkeygen:
|
||||
# generate ssh key (note: this function is check mode aware)
|
||||
(rc, out, err) = user.ssh_key_gen()
|
||||
if rc is not None and rc != 0:
|
||||
module.fail_json(name=user.name, msg=err, rc=rc)
|
||||
|
|
21
test-docs.sh
Executable file
21
test-docs.sh
Executable file
|
@ -0,0 +1,21 @@
|
|||
#!/bin/sh
|
||||
set -x
|
||||
|
||||
CHECKOUT_DIR=".ansible-checkout"
|
||||
MOD_REPO="$1"
|
||||
|
||||
# Hidden file to avoid the module_formatter recursing into the checkout
|
||||
git clone https://github.com/ansible/ansible "$CHECKOUT_DIR"
|
||||
cd "$CHECKOUT_DIR"
|
||||
git submodule update --init
|
||||
rm -rf "lib/ansible/modules/$MOD_REPO"
|
||||
ln -s "$TRAVIS_BUILD_DIR/" "lib/ansible/modules/$MOD_REPO"
|
||||
|
||||
pip install -U Jinja2 PyYAML setuptools six pycrypto sphinx
|
||||
|
||||
. ./hacking/env-setup
|
||||
PAGER=/bin/cat bin/ansible-doc -l
|
||||
if [ $? -ne 0 ] ; then
|
||||
exit $?
|
||||
fi
|
||||
make -C docsite
|
|
@ -1,5 +1,20 @@
|
|||
# -*- mode: python -*-
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: pause
|
||||
|
|
|
@ -301,6 +301,25 @@ def _little_endian_convert_32bit(block):
|
|||
# which lets us start at the end of the string block and work to the begining
|
||||
return "".join([ block[x:x+2] for x in xrange(6, -2, -2) ])
|
||||
|
||||
def _create_connection( (host, port), connect_timeout):
|
||||
"""
|
||||
Connect to a 2-tuple (host, port) and return
|
||||
the socket object.
|
||||
|
||||
Args:
|
||||
2-tuple (host, port) and connection timeout
|
||||
Returns:
|
||||
Socket object
|
||||
"""
|
||||
if sys.version_info < (2, 6):
|
||||
(family, _) = _convert_host_to_ip(host)
|
||||
connect_socket = socket.socket(family, socket.SOCK_STREAM)
|
||||
connect_socket.settimeout(connect_timeout)
|
||||
connect_socket.connect( (host, port) )
|
||||
else:
|
||||
connect_socket = socket.create_connection( (host, port), connect_timeout)
|
||||
return connect_socket
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
|
@ -362,10 +381,8 @@ def main():
|
|||
except IOError:
|
||||
break
|
||||
elif port:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
s.settimeout(connect_timeout)
|
||||
try:
|
||||
s.connect( (host, port) )
|
||||
s = _create_connection( (host, port), connect_timeout)
|
||||
s.shutdown(socket.SHUT_RDWR)
|
||||
s.close()
|
||||
time.sleep(1)
|
||||
|
@ -410,10 +427,8 @@ def main():
|
|||
elapsed = datetime.datetime.now() - start
|
||||
module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
|
||||
elif port:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
s.settimeout(connect_timeout)
|
||||
try:
|
||||
s.connect( (host, port) )
|
||||
s = _create_connection( (host, port), connect_timeout)
|
||||
if search_regex:
|
||||
data = ''
|
||||
matched = False
|
||||
|
|
|
@ -35,6 +35,7 @@ options:
|
|||
choices: ['present', 'absent']
|
||||
default: present
|
||||
|
||||
requirements: ["a2enmod","a2dismod"]
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -30,7 +30,8 @@ options:
|
|||
command:
|
||||
choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ]
|
||||
description:
|
||||
- The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate. Other commands can be entered, but will fail if they're unknown to Django.
|
||||
- The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate.
|
||||
- Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run with the I(--noinput) flag.
|
||||
required: true
|
||||
app_path:
|
||||
description:
|
||||
|
@ -89,7 +90,7 @@ notes:
|
|||
- I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified.
|
||||
- This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location.
|
||||
- This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately.
|
||||
- To be able to use the migrate command, you must have south installed and added as an app in your settings
|
||||
- To be able to use the migrate command with django versions < 1.7, you must have south installed and added as an app in your settings
|
||||
- To be able to use the collectstatic command, you must have enabled staticfiles in your settings
|
||||
requirements: [ "virtualenv", "django" ]
|
||||
author: "Scott Anderson (@tastychutney)"
|
||||
|
@ -102,7 +103,7 @@ EXAMPLES = """
|
|||
# Load the initial_data fixture into the application
|
||||
- django_manage: command=loaddata app_path={{ django_dir }} fixtures={{ initial_data }}
|
||||
|
||||
#Run syncdb on the application
|
||||
# Run syncdb on the application
|
||||
- django_manage: >
|
||||
command=syncdb
|
||||
app_path={{ django_dir }}
|
||||
|
@ -110,8 +111,11 @@ EXAMPLES = """
|
|||
pythonpath={{ settings_dir }}
|
||||
virtualenv={{ virtualenv_dir }}
|
||||
|
||||
#Run the SmokeTest test case from the main app. Useful for testing deploys.
|
||||
- django_manage: command=test app_path=django_dir apps=main.SmokeTest
|
||||
# Run the SmokeTest test case from the main app. Useful for testing deploys.
|
||||
- django_manage: command=test app_path={{ django_dir }} apps=main.SmokeTest
|
||||
|
||||
# Create an initial superuser.
|
||||
- django_manage: command="createsuperuser --noinput --username=admin --email=admin@example.com" app_path={{ django_dir }}
|
||||
"""
|
||||
|
||||
|
||||
|
@ -159,7 +163,10 @@ def syncdb_filter_output(line):
|
|||
return ("Creating table " in line) or ("Installed" in line and "Installed 0 object" not in line)
|
||||
|
||||
def migrate_filter_output(line):
|
||||
return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line)
|
||||
return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) or ("Applying" in line)
|
||||
|
||||
def collectstatic_filter_output(line):
|
||||
return "0 static files" not in line
|
||||
|
||||
def main():
|
||||
command_allowed_param_map = dict(
|
||||
|
@ -234,7 +241,7 @@ def main():
|
|||
|
||||
_ensure_virtualenv(module)
|
||||
|
||||
cmd = "python manage.py %s" % (command, )
|
||||
cmd = "./manage.py %s" % (command, )
|
||||
|
||||
if command in noinput_commands:
|
||||
cmd = '%s --noinput' % cmd
|
||||
|
|
|
@ -46,7 +46,10 @@ options:
|
|||
choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
|
||||
default: "apr_md5_crypt"
|
||||
description:
|
||||
- Encryption scheme to be used.
|
||||
- Encryption scheme to be used. As well as the four choices listed
|
||||
here, you can also use any other hash supported by passlib, such as
|
||||
md5_crypt and sha256_crypt, which are linux passwd hashes. If you
|
||||
do so the password file will not be compatible with Apache or Nginx
|
||||
state:
|
||||
required: false
|
||||
choices: [ present, absent ]
|
||||
|
@ -74,6 +77,8 @@ EXAMPLES = """
|
|||
- htpasswd: path=/etc/nginx/passwdfile name=janedoe password=9s36?;fyNp owner=root group=www-data mode=0640
|
||||
# Remove a user from a password file
|
||||
- htpasswd: path=/etc/apache2/passwdfile name=foobar state=absent
|
||||
# Add a user to a password file suitable for use by libpam-pwdfile
|
||||
- htpasswd: path=/etc/mail/passwords name=alex password=oedu2eGh crypt_scheme=md5_crypt
|
||||
"""
|
||||
|
||||
|
||||
|
@ -82,13 +87,15 @@ import tempfile
|
|||
from distutils.version import StrictVersion
|
||||
|
||||
try:
|
||||
from passlib.apache import HtpasswdFile
|
||||
from passlib.apache import HtpasswdFile, htpasswd_context
|
||||
from passlib.context import CryptContext
|
||||
import passlib
|
||||
except ImportError:
|
||||
passlib_installed = False
|
||||
else:
|
||||
passlib_installed = True
|
||||
|
||||
apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
|
||||
|
||||
def create_missing_directories(dest):
|
||||
destpath = os.path.dirname(dest)
|
||||
|
@ -100,6 +107,10 @@ def present(dest, username, password, crypt_scheme, create, check_mode):
|
|||
""" Ensures user is present
|
||||
|
||||
Returns (msg, changed) """
|
||||
if crypt_scheme in apache_hashes:
|
||||
context = htpasswd_context
|
||||
else:
|
||||
context = CryptContext(schemes = [ crypt_scheme ] + apache_hashes)
|
||||
if not os.path.exists(dest):
|
||||
if not create:
|
||||
raise ValueError('Destination %s does not exist' % dest)
|
||||
|
@ -107,9 +118,9 @@ def present(dest, username, password, crypt_scheme, create, check_mode):
|
|||
return ("Create %s" % dest, True)
|
||||
create_missing_directories(dest)
|
||||
if StrictVersion(passlib.__version__) >= StrictVersion('1.6'):
|
||||
ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme)
|
||||
ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
|
||||
else:
|
||||
ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme)
|
||||
ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
|
||||
if getattr(ht, 'set_password', None):
|
||||
ht.set_password(username, password)
|
||||
else:
|
||||
|
@ -118,9 +129,9 @@ def present(dest, username, password, crypt_scheme, create, check_mode):
|
|||
return ("Created %s and added %s" % (dest, username), True)
|
||||
else:
|
||||
if StrictVersion(passlib.__version__) >= StrictVersion('1.6'):
|
||||
ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme)
|
||||
ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
|
||||
else:
|
||||
ht = HtpasswdFile(dest, default=crypt_scheme)
|
||||
ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
|
||||
|
||||
found = None
|
||||
if getattr(ht, 'check_password', None):
|
||||
|
@ -179,7 +190,7 @@ def main():
|
|||
path=dict(required=True, aliases=["dest", "destfile"]),
|
||||
name=dict(required=True, aliases=["username"]),
|
||||
password=dict(required=False, default=None),
|
||||
crypt_scheme=dict(required=False, default=None),
|
||||
crypt_scheme=dict(required=False, default="apr_md5_crypt"),
|
||||
state=dict(required=False, default="present"),
|
||||
create=dict(type='bool', default='yes'),
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ version_added: "0.7"
|
|||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the supervisord program or group to manage.
|
||||
- The name of the supervisord program or group to manage.
|
||||
- The name will be taken as group name when it ends with a colon I(:)
|
||||
- Group support is only available in Ansible version 1.6 or later.
|
||||
required: true
|
||||
|
@ -64,7 +64,7 @@ options:
|
|||
- The desired state of program/group.
|
||||
required: true
|
||||
default: null
|
||||
choices: [ "present", "started", "stopped", "restarted" ]
|
||||
choices: [ "present", "started", "stopped", "restarted", "absent" ]
|
||||
supervisorctl_path:
|
||||
description:
|
||||
- path to supervisorctl executable
|
||||
|
@ -75,8 +75,8 @@ notes:
|
|||
- When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist.
|
||||
- When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart).
|
||||
requirements: [ "supervisorctl" ]
|
||||
author:
|
||||
- "Matt Wright (@mattupstate)"
|
||||
author:
|
||||
- "Matt Wright (@mattupstate)"
|
||||
- "Aaron Wang (@inetfuture) <inetfuture@gmail.com>"
|
||||
'''
|
||||
|
||||
|
@ -103,7 +103,7 @@ def main():
|
|||
username=dict(required=False),
|
||||
password=dict(required=False),
|
||||
supervisorctl_path=dict(required=False),
|
||||
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped'])
|
||||
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent'])
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
|
||||
|
@ -194,10 +194,26 @@ def main():
|
|||
if state == 'restarted':
|
||||
rc, out, err = run_supervisorctl('update', check_rc=True)
|
||||
processes = get_matched_processes()
|
||||
if len(processes) == 0:
|
||||
module.fail_json(name=name, msg="ERROR (no such process)")
|
||||
|
||||
take_action_on_processes(processes, lambda s: True, 'restart', 'started')
|
||||
|
||||
processes = get_matched_processes()
|
||||
|
||||
if state == 'absent':
|
||||
if len(processes) == 0:
|
||||
module.exit_json(changed=False, name=name, state=state)
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
run_supervisorctl('reread', check_rc=True)
|
||||
rc, out, err = run_supervisorctl('remove', name)
|
||||
if '%s: removed process group' % name in out:
|
||||
module.exit_json(changed=True, name=name, state=state)
|
||||
else:
|
||||
module.fail_json(msg=out, name=name, state=state)
|
||||
|
||||
if state == 'present':
|
||||
if len(processes) > 0:
|
||||
module.exit_json(changed=False, name=name, state=state)
|
||||
|
@ -212,9 +228,13 @@ def main():
|
|||
module.fail_json(msg=out, name=name, state=state)
|
||||
|
||||
if state == 'started':
|
||||
if len(processes) == 0:
|
||||
module.fail_json(name=name, msg="ERROR (no such process)")
|
||||
take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started')
|
||||
|
||||
if state == 'stopped':
|
||||
if len(processes) == 0:
|
||||
module.fail_json(name=name, msg="ERROR (no such process)")
|
||||
take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped')
|
||||
|
||||
# import module snippets
|
||||
|
|
|
@ -25,7 +25,7 @@ $result = New-Object psobject @{
|
|||
changed = $false
|
||||
};
|
||||
|
||||
$win32_os = Get-WmiObject Win32_OperatingSystem
|
||||
$win32_os = Get-CimInstance Win32_OperatingSystem
|
||||
$osversion = [Environment]::OSVersion
|
||||
$memory = @()
|
||||
$memory += Get-WmiObject win32_Physicalmemory
|
||||
|
@ -60,12 +60,15 @@ Set-Attr $result.ansible_facts "ansible_hostname" $env:COMPUTERNAME;
|
|||
Set-Attr $result.ansible_facts "ansible_fqdn" "$([System.Net.Dns]::GetHostByName((hostname)).HostName)"
|
||||
Set-Attr $result.ansible_facts "ansible_system" $osversion.Platform.ToString()
|
||||
Set-Attr $result.ansible_facts "ansible_os_family" "Windows"
|
||||
Set-Attr $result.ansible_facts "ansible_os_name" $win32_os.Name.Split('|')[0]
|
||||
Set-Attr $result.ansible_facts "ansible_os_name" ($win32_os.Name.Split('|')[0]).Trim()
|
||||
Set-Attr $result.ansible_facts "ansible_distribution" $osversion.VersionString
|
||||
Set-Attr $result.ansible_facts "ansible_distribution_version" $osversion.Version.ToString()
|
||||
|
||||
Set-Attr $result.ansible_facts "ansible_totalmem" $capacity
|
||||
|
||||
Set-Attr $result.ansible_facts "ansible_lastboot" $win32_os.lastbootuptime.ToString("u")
|
||||
Set-Attr $result.ansible_facts "ansible_uptime_seconds" $([System.Convert]::ToInt64($(Get-Date).Subtract($win32_os.lastbootuptime).TotalSeconds))
|
||||
|
||||
$ips = @()
|
||||
Foreach ($ip in $netcfg.IPAddress) { If ($ip) { $ips += $ip } }
|
||||
Set-Attr $result.ansible_facts "ansible_ip_addresses" $ips
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
# WANT_JSON
|
||||
# POWERSHELL_COMMON
|
||||
|
||||
$params = Parse-Args $args;
|
||||
$params = Parse-Args $args $true;
|
||||
|
||||
$src = Get-Attr $params "src" (Get-Attr $params "path" $FALSE);
|
||||
If (-not $src)
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
|
|
|
@ -27,48 +27,18 @@ $result = New-Object PSObject -Property @{
|
|||
changed = $false
|
||||
}
|
||||
|
||||
If ($params.name) {
|
||||
$name = $params.name
|
||||
}
|
||||
Else {
|
||||
Fail-Json $result "mising required argument: name"
|
||||
$name = Get-Attr $params "name" -failifempty $true
|
||||
$name = $name -split ',' | % { $_.Trim() }
|
||||
|
||||
$state = Get-Attr $params "state" "present"
|
||||
$state = $state.ToString().ToLower()
|
||||
If (($state -ne 'present') -and ($state -ne 'absent')) {
|
||||
Fail-Json $result "state is '$state'; must be 'present' or 'absent'"
|
||||
}
|
||||
|
||||
If ($params.state) {
|
||||
$state = $params.state.ToString().ToLower()
|
||||
If (($state -ne 'present') -and ($state -ne 'absent')) {
|
||||
Fail-Json $result "state is '$state'; must be 'present' or 'absent'"
|
||||
}
|
||||
}
|
||||
Elseif (!$params.state) {
|
||||
$state = "present"
|
||||
}
|
||||
|
||||
If ($params.restart) {
|
||||
$restart = $params.restart | ConvertTo-Bool
|
||||
}
|
||||
Else
|
||||
{
|
||||
$restart = $false
|
||||
}
|
||||
|
||||
if ($params.include_sub_features)
|
||||
{
|
||||
$includesubfeatures = $params.include_sub_features | ConvertTo-Bool
|
||||
}
|
||||
Else
|
||||
{
|
||||
$includesubfeatures = $false
|
||||
}
|
||||
|
||||
if ($params.include_management_tools)
|
||||
{
|
||||
$includemanagementtools = $params.include_management_tools | ConvertTo-Bool
|
||||
}
|
||||
Else
|
||||
{
|
||||
$includemanagementtools = $false
|
||||
}
|
||||
$restart = Get-Attr $params "restart" $false | ConvertTo-Bool
|
||||
$includesubfeatures = Get-Attr $params "include_sub_features" $false | ConvertTo-Bool
|
||||
$includemanagementtools = Get-Attr $params "include_management_tools" $false | ConvertTo-Bool
|
||||
|
||||
If ($state -eq "present") {
|
||||
try {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue