Merge branch 'devel' of https://github.com/ansible/ansible-modules-core into ansible-devel

This commit is contained in:
Phillip Holmes 2015-01-22 15:04:55 -06:00
commit c5cb352262
143 changed files with 2523 additions and 1213 deletions

View file

@ -50,7 +50,7 @@ options:
state: state:
description: description:
- If state is "present", stack will be created. If state is "present" and if stack exists and template has changed, it will be updated. - If state is "present", stack will be created. If state is "present" and if stack exists and template has changed, it will be updated.
If state is absent, stack will be removed. If state is "absent", stack will be removed.
required: true required: true
default: null default: null
aliases: [] aliases: []
@ -60,6 +60,13 @@ options:
required: true required: true
default: null default: null
aliases: [] aliases: []
stack_policy:
description:
- the path of the cloudformation stack policy
required: false
default: null
aliases: []
version_added: "x.x"
tags: tags:
description: description:
- Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later. - Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later.
@ -97,18 +104,19 @@ EXAMPLES = '''
# Basic task example # Basic task example
tasks: tasks:
- name: launch ansible cloudformation example - name: launch ansible cloudformation example
action: cloudformation > cloudformation:
stack_name="ansible-cloudformation" state=present stack_name: "ansible-cloudformation"
region=us-east-1 disable_rollback=true state: "present"
template=files/cloudformation-example.json region: "us-east-1"
args: disable_rollback: true
template: "files/cloudformation-example.json"
template_parameters: template_parameters:
KeyName: jmartin KeyName: "jmartin"
DiskType: ephemeral DiskType: "ephemeral"
InstanceType: m1.small InstanceType: "m1.small"
ClusterSize: 3 ClusterSize: 3
tags: tags:
Stack: ansible-cloudformation Stack: "ansible-cloudformation"
''' '''
import json import json
@ -122,13 +130,6 @@ except ImportError:
sys.exit(1) sys.exit(1)
class Region:
def __init__(self, region):
'''connects boto to the region specified in the cloudformation template'''
self.name = region
self.endpoint = 'cloudformation.%s.amazonaws.com' % region
def boto_exception(err): def boto_exception(err):
'''generic error message handler''' '''generic error message handler'''
if hasattr(err, 'error_message'): if hasattr(err, 'error_message'):
@ -196,6 +197,7 @@ def main():
template_parameters=dict(required=False, type='dict', default={}), template_parameters=dict(required=False, type='dict', default={}),
state=dict(default='present', choices=['present', 'absent']), state=dict(default='present', choices=['present', 'absent']),
template=dict(default=None, required=True), template=dict(default=None, required=True),
stack_policy=dict(default=None, required=False),
disable_rollback=dict(default=False, type='bool'), disable_rollback=dict(default=False, type='bool'),
tags=dict(default=None) tags=dict(default=None)
) )
@ -208,6 +210,10 @@ def main():
state = module.params['state'] state = module.params['state']
stack_name = module.params['stack_name'] stack_name = module.params['stack_name']
template_body = open(module.params['template'], 'r').read() template_body = open(module.params['template'], 'r').read()
if module.params['stack_policy'] is not None:
stack_policy_body = open(module.params['stack_policy'], 'r').read()
else:
stack_policy_body = None
disable_rollback = module.params['disable_rollback'] disable_rollback = module.params['disable_rollback']
template_parameters = module.params['template_parameters'] template_parameters = module.params['template_parameters']
tags = module.params['tags'] tags = module.params['tags']
@ -226,11 +232,10 @@ def main():
stack_outputs = {} stack_outputs = {}
try: try:
cf_region = Region(region) cfn = boto.cloudformation.connect_to_region(
cfn = boto.cloudformation.connection.CloudFormationConnection( region,
aws_access_key_id=aws_access_key, aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key, aws_secret_access_key=aws_secret_key,
region=cf_region,
) )
except boto.exception.NoAuthHandlerFound, e: except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))
@ -244,6 +249,7 @@ def main():
try: try:
cfn.create_stack(stack_name, parameters=template_parameters_tup, cfn.create_stack(stack_name, parameters=template_parameters_tup,
template_body=template_body, template_body=template_body,
stack_policy_body=stack_policy_body,
disable_rollback=disable_rollback, disable_rollback=disable_rollback,
capabilities=['CAPABILITY_IAM'], capabilities=['CAPABILITY_IAM'],
**kwargs) **kwargs)
@ -264,6 +270,7 @@ def main():
try: try:
cfn.update_stack(stack_name, parameters=template_parameters_tup, cfn.update_stack(stack_name, parameters=template_parameters_tup,
template_body=template_body, template_body=template_body,
stack_policy_body=stack_policy_body,
disable_rollback=disable_rollback, disable_rollback=disable_rollback,
capabilities=['CAPABILITY_IAM']) capabilities=['CAPABILITY_IAM'])
operation = 'UPDATE' operation = 'UPDATE'

View file

@ -17,9 +17,9 @@
DOCUMENTATION = ''' DOCUMENTATION = '''
--- ---
module: ec2 module: ec2
short_description: create, terminate, start or stop an instance in ec2, return instanceid short_description: create, terminate, start or stop an instance in ec2
description: description:
- Creates or terminates ec2 instances. When created optionally waits for it to be 'running'. This module has a dependency on python-boto >= 2.5 - Creates or terminates ec2 instances.
version_added: "0.9" version_added: "0.9"
options: options:
key_name: key_name:
@ -28,12 +28,6 @@ options:
required: false required: false
default: null default: null
aliases: ['keypair'] aliases: ['keypair']
id:
description:
- identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
required: false
default: null
aliases: []
group: group:
description: description:
- security group (or list of groups) to use with the instance - security group (or list of groups) to use with the instance
@ -67,6 +61,13 @@ options:
required: true required: true
default: null default: null
aliases: [] aliases: []
tenancy:
version_added: "1.9"
description:
- An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are "default" or "dedicated". Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances.
required: false
default: default
aliases: []
spot_price: spot_price:
version_added: "1.5" version_added: "1.5"
description: description:
@ -76,7 +77,7 @@ options:
aliases: [] aliases: []
image: image:
description: description:
- I(emi) (or I(ami)) to use for the instance - I(ami) ID to use for the instance
required: true required: true
default: null default: null
aliases: [] aliases: []
@ -94,7 +95,7 @@ options:
aliases: [] aliases: []
wait: wait:
description: description:
- wait for the instance to be in state 'running' before returning - wait for the instance to be 'running' before returning. Does not wait for SSH, see 'wait_for' example for details.
required: false required: false
default: "no" default: "no"
choices: [ "yes", "no" ] choices: [ "yes", "no" ]
@ -226,40 +227,40 @@ extends_documentation_fragment: aws
''' '''
EXAMPLES = ''' EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region. # Note: These examples do not set authentication details, see the AWS Guide for details.
# It is assumed that their matching environment variables are set.
# Basic provisioning example # Basic provisioning example
- local_action: - ec2:
module: ec2
key_name: mykey key_name: mykey
instance_type: c1.medium instance_type: t2.micro
image: emi-40603AD1 image: ami-123456
wait: yes wait: yes
group: webserver group: webserver
count: 3 count: 3
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Advanced example with tagging and CloudWatch # Advanced example with tagging and CloudWatch
- local_action: - ec2:
module: ec2
key_name: mykey key_name: mykey
group: databases group: databases
instance_type: m1.large instance_type: t2.micro
image: ami-6e649707 image: ami-123456
wait: yes wait: yes
wait_timeout: 500 wait_timeout: 500
count: 5 count: 5
instance_tags: instance_tags:
db: postgres db: postgres
monitoring: yes monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Single instance with additional IOPS volume from snapshot and volume delete on termination # Single instance with additional IOPS volume from snapshot and volume delete on termination
local_action: - ec2:
module: ec2
key_name: mykey key_name: mykey
group: webserver group: webserver
instance_type: m1.large instance_type: c3.medium
image: ami-6e649707 image: ami-123456
wait: yes wait: yes
wait_timeout: 500 wait_timeout: 500
volumes: volumes:
@ -270,10 +271,11 @@ local_action:
volume_size: 100 volume_size: 100
delete_on_termination: true delete_on_termination: true
monitoring: yes monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Multiple groups example # Multiple groups example
local_action: - ec2:
module: ec2
key_name: mykey key_name: mykey
group: ['databases', 'internal-services', 'sshable', 'and-so-forth'] group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
instance_type: m1.large instance_type: m1.large
@ -284,10 +286,11 @@ local_action:
instance_tags: instance_tags:
db: postgres db: postgres
monitoring: yes monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Multiple instances with additional volume from snapshot # Multiple instances with additional volume from snapshot
local_action: - ec2:
module: ec2
key_name: mykey key_name: mykey
group: webserver group: webserver
instance_type: m1.large instance_type: m1.large
@ -300,21 +303,23 @@ local_action:
snapshot: snap-abcdef12 snapshot: snap-abcdef12
volume_size: 10 volume_size: 10
monitoring: yes monitoring: yes
# VPC example
- local_action:
module: ec2
key_name: mykey
group_id: sg-1dc53f72
instance_type: m1.small
image: ami-6e649707
wait: yes
vpc_subnet_id: subnet-29e63245 vpc_subnet_id: subnet-29e63245
assign_public_ip: yes assign_public_ip: yes
# Spot instance example # Dedicated tenancy example
- local_action: - local_action:
module: ec2 module: ec2
assign_public_ip: yes
group_id: sg-1dc53f72
key_name: mykey
image: ami-6e649707
instance_type: m1.small
tenancy: dedicated
vpc_subnet_id: subnet-29e63245
wait: yes
# Spot instance example
- ec2:
spot_price: 0.24 spot_price: 0.24
spot_wait_timeout: 600 spot_wait_timeout: 600
keypair: mykey keypair: mykey
@ -328,7 +333,6 @@ local_action:
# Launch instances, runs some tasks # Launch instances, runs some tasks
# and then terminate them # and then terminate them
- name: Create a sandbox instance - name: Create a sandbox instance
hosts: localhost hosts: localhost
gather_facts: False gather_facts: False
@ -340,13 +344,21 @@ local_action:
region: us-east-1 region: us-east-1
tasks: tasks:
- name: Launch instance - name: Launch instance
local_action: ec2 key_name={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image }} wait=true region={{ region }} ec2:
key_name: "{{ keypair }}"
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: true
region: "{{ region }}"
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
register: ec2 register: ec2
- name: Add new instance to host group - name: Add new instance to host group
local_action: add_host hostname={{ item.public_ip }} groupname=launched add_host: hostname={{ item.public_ip }} groupname=launched
with_items: ec2.instances with_items: ec2.instances
- name: Wait for SSH to come up - name: Wait for SSH to come up
local_action: wait_for host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started wait_for: host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started
with_items: ec2.instances with_items: ec2.instances
- name: Configure instance(s) - name: Configure instance(s)
@ -362,8 +374,7 @@ local_action:
connection: local connection: local
tasks: tasks:
- name: Terminate instances that were previously launched - name: Terminate instances that were previously launched
local_action: ec2:
module: ec2
state: 'absent' state: 'absent'
instance_ids: '{{ ec2.instance_ids }}' instance_ids: '{{ ec2.instance_ids }}'
@ -382,12 +393,13 @@ local_action:
region: us-east-1 region: us-east-1
tasks: tasks:
- name: Start the sandbox instances - name: Start the sandbox instances
local_action: ec2:
module: ec2
instance_ids: '{{ instance_ids }}' instance_ids: '{{ instance_ids }}'
region: '{{ region }}' region: '{{ region }}'
state: running state: running
wait: True wait: True
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
role: role:
- do_neat_stuff - do_neat_stuff
- do_more_neat_stuff - do_more_neat_stuff
@ -403,39 +415,41 @@ local_action:
- 'i-xxxxxx' - 'i-xxxxxx'
region: us-east-1 region: us-east-1
tasks: tasks:
- name: Stop the sanbox instances - name: Stop the sandbox instances
local_action: ec2:
module: ec2
instance_ids: '{{ instance_ids }}' instance_ids: '{{ instance_ids }}'
region: '{{ region }}' region: '{{ region }}'
state: stopped state: stopped
wait: True wait: True
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# #
# Enforce that 5 instances with a tag "foo" are running # Enforce that 5 instances with a tag "foo" are running
# (Highly recommended!)
# #
- local_action: - ec2:
module: ec2
key_name: mykey key_name: mykey
instance_type: c1.medium instance_type: c1.medium
image: emi-40603AD1 image: ami-40603AD1
wait: yes wait: yes
group: webserver group: webserver
instance_tags: instance_tags:
foo: bar foo: bar
exact_count: 5 exact_count: 5
count_tag: foo count_tag: foo
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# #
# Enforce that 5 running instances named "database" with a "dbtype" of "postgres" # Enforce that 5 running instances named "database" with a "dbtype" of "postgres"
# #
- local_action: - ec2:
module: ec2
key_name: mykey key_name: mykey
instance_type: c1.medium instance_type: c1.medium
image: emi-40603AD1 image: ami-40603AD1
wait: yes wait: yes
group: webserver group: webserver
instance_tags: instance_tags:
@ -445,6 +459,8 @@ local_action:
count_tag: count_tag:
Name: database Name: database
dbtype: postgres dbtype: postgres
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# #
# count_tag complex argument examples # count_tag complex argument examples
@ -501,7 +517,7 @@ def _set_none_to_blank(dictionary):
result = dictionary result = dictionary
for k in result.iterkeys(): for k in result.iterkeys():
if type(result[k]) == dict: if type(result[k]) == dict:
result[k] = _set_non_to_blank(result[k]) result[k] = _set_none_to_blank(result[k])
elif not result[k]: elif not result[k]:
result[k] = "" result[k] = ""
return result return result
@ -585,6 +601,11 @@ def get_instance_info(inst):
except AttributeError: except AttributeError:
instance_info['ebs_optimized'] = False instance_info['ebs_optimized'] = False
try:
instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
except AttributeError:
instance_info['tenancy'] = 'default'
return instance_info return instance_info
def boto_supports_associate_public_ip_address(ec2): def boto_supports_associate_public_ip_address(ec2):
@ -660,6 +681,11 @@ def enforce_count(module, ec2):
count_tag = module.params.get('count_tag') count_tag = module.params.get('count_tag')
zone = module.params.get('zone') zone = module.params.get('zone')
# fail here if the exact count was specified without filtering
# on a tag, as this may lead to a undesired removal of instances
if exact_count and count_tag is None:
module.fail_json(msg="you must use the 'count_tag' option with exact_count")
reservations, instances = find_running_instances_by_count_tag(module, ec2, count_tag, zone) reservations, instances = find_running_instances_by_count_tag(module, ec2, count_tag, zone)
changed = None changed = None
@ -723,6 +749,7 @@ def create_instances(module, ec2, override_count=None):
group_id = module.params.get('group_id') group_id = module.params.get('group_id')
zone = module.params.get('zone') zone = module.params.get('zone')
instance_type = module.params.get('instance_type') instance_type = module.params.get('instance_type')
tenancy = module.params.get('tenancy')
spot_price = module.params.get('spot_price') spot_price = module.params.get('spot_price')
image = module.params.get('image') image = module.params.get('image')
if override_count: if override_count:
@ -807,6 +834,9 @@ def create_instances(module, ec2, override_count=None):
if ebs_optimized: if ebs_optimized:
params['ebs_optimized'] = ebs_optimized params['ebs_optimized'] = ebs_optimized
if tenancy:
params['tenancy'] = tenancy
if boto_supports_profile_name_arg(ec2): if boto_supports_profile_name_arg(ec2):
params['instance_profile_name'] = instance_profile_name params['instance_profile_name'] = instance_profile_name
else: else:
@ -1148,6 +1178,7 @@ def main():
count_tag = dict(), count_tag = dict(),
volumes = dict(type='list'), volumes = dict(type='list'),
ebs_optimized = dict(type='bool', default=False), ebs_optimized = dict(type='bool', default=False),
tenancy = dict(default='default'),
) )
) )

View file

@ -18,9 +18,9 @@ DOCUMENTATION = '''
--- ---
module: ec2_ami module: ec2_ami
version_added: "1.3" version_added: "1.3"
short_description: create or destroy an image in ec2, return imageid short_description: create or destroy an image in ec2
description: description:
- Creates or deletes ec2 images. This module has a dependency on python-boto >= 2.5 - Creates or deletes ec2 images.
options: options:
instance_id: instance_id:
description: description:
@ -79,7 +79,7 @@ options:
aliases: [] aliases: []
delete_snapshot: delete_snapshot:
description: description:
- Whether or not to deleted an AMI while deregistering it. - Whether or not to delete an AMI while deregistering it.
required: false required: false
default: null default: null
aliases: [] aliases: []
@ -89,13 +89,10 @@ extends_documentation_fragment: aws
''' '''
# Thank you to iAcquire for sponsoring development of this module. # Thank you to iAcquire for sponsoring development of this module.
#
# See http://alestic.com/2011/06/ec2-ami-security for more information about ensuring the security of your AMI.
EXAMPLES = ''' EXAMPLES = '''
# Basic AMI Creation # Basic AMI Creation
- local_action: - ec2_ami:
module: ec2_ami
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx instance_id: i-xxxxxx
@ -104,8 +101,7 @@ EXAMPLES = '''
register: instance register: instance
# Basic AMI Creation, without waiting # Basic AMI Creation, without waiting
- local_action: - ec2_ami:
module: ec2_ami
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx region: xxxxxx
@ -115,22 +111,20 @@ EXAMPLES = '''
register: instance register: instance
# Deregister/Delete AMI # Deregister/Delete AMI
- local_action: - ec2_ami:
module: ec2_ami
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx region: xxxxxx
image_id: ${instance.image_id} image_id: "{{ instance.image_id }}"
delete_snapshot: True delete_snapshot: True
state: absent state: absent
# Deregister AMI # Deregister AMI
- local_action: - ec2_ami:
module: ec2_ami
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx region: xxxxxx
image_id: ${instance.image_id} image_id: "{{ instance.image_id }}"
delete_snapshot: False delete_snapshot: False
state: absent state: absent

View file

@ -16,10 +16,11 @@
# #
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = ''' DOCUMENTATION = '''
--- ---
module: ec2_ami_search module: ec2_ami_search
short_description: Retrieve AWS AMI for a given operating system. short_description: Retrieve AWS AMI information for a given operating system.
version_added: "1.6" version_added: "1.6"
description: description:
- Look up the most recent AMI on AWS for a given operating system. - Look up the most recent AMI on AWS for a given operating system.
@ -56,7 +57,8 @@ options:
required: false required: false
default: us-east-1 default: us-east-1
choices: ["ap-northeast-1", "ap-southeast-1", "ap-southeast-2", choices: ["ap-northeast-1", "ap-southeast-1", "ap-southeast-2",
"eu-west-1", "sa-east-1", "us-east-1", "us-west-1", "us-west-2", "us-gov-west-1"] "eu-central-1", "eu-west-1", "sa-east-1", "us-east-1",
"us-west-1", "us-west-2", "us-gov-west-1"]
virt: virt:
description: virutalization type description: virutalization type
required: false required: false
@ -88,11 +90,13 @@ SUPPORTED_DISTROS = ['ubuntu']
AWS_REGIONS = ['ap-northeast-1', AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1', 'ap-southeast-1',
'ap-southeast-2', 'ap-southeast-2',
'eu-central-1',
'eu-west-1', 'eu-west-1',
'sa-east-1', 'sa-east-1',
'us-east-1', 'us-east-1',
'us-west-1', 'us-west-1',
'us-west-2'] 'us-west-2',
"us-gov-west-1"]
def get_url(module, url): def get_url(module, url):

72
cloud/ec2_asg.py → cloud/amazon/ec2_asg.py Executable file → Normal file
View file

@ -119,21 +119,23 @@ extends_documentation_fragment: aws
""" """
EXAMPLES = ''' EXAMPLES = '''
A basic example of configuration: # Basic configuration
- ec2_asg: - ec2_asg:
name: special name: special
load_balancers: 'lb1,lb2' load_balancers: [ 'lb1', 'lb2' ]
availability_zones: 'eu-west-1a,eu-west-1b' availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
launch_config_name: 'lc-1' launch_config_name: 'lc-1'
min_size: 1 min_size: 1
max_size: 10 max_size: 10
desired_capacity: 5 desired_capacity: 5
vpc_zone_identifier: 'subnet-abcd1234,subnet-1a2b3c4d' vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
tags: tags:
- environment: production - environment: production
propagate_at_launch: no propagate_at_launch: no
# Rolling ASG Updates
Below is an example of how to assign a new launch config to an ASG and terminate old instances. Below is an example of how to assign a new launch config to an ASG and terminate old instances.
All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
@ -199,7 +201,7 @@ except ImportError:
ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity', ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity',
'health_check_period', 'health_check_type', 'launch_config_name', 'health_check_period', 'health_check_type', 'launch_config_name',
'load_balancers', 'max_size', 'min_size', 'name', 'placement_group', 'load_balancers', 'max_size', 'min_size', 'name', 'placement_group',
'tags', 'termination_policies', 'vpc_zone_identifier') 'termination_policies', 'vpc_zone_identifier')
INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
@ -245,6 +247,10 @@ def get_properties(autoscaling_group):
properties['pending_instances'] += 1 properties['pending_instances'] += 1
properties['instance_facts'] = instance_facts properties['instance_facts'] = instance_facts
properties['load_balancers'] = autoscaling_group.load_balancers properties['load_balancers'] = autoscaling_group.load_balancers
if getattr(autoscaling_group, "tags", None):
properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags)
return properties return properties
@ -268,8 +274,10 @@ def create_autoscaling_group(connection, module):
region, ec2_url, aws_connect_params = get_aws_connection_info(module) region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try: try:
ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params) ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e: except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))
elif vpc_zone_identifier:
vpc_zone_identifier = ','.join(vpc_zone_identifier)
asg_tags = [] asg_tags = []
for tag in set_tags: for tag in set_tags:
@ -318,6 +326,8 @@ def create_autoscaling_group(connection, module):
for attr in ASG_ATTRIBUTES: for attr in ASG_ATTRIBUTES:
if module.params.get(attr): if module.params.get(attr):
module_attr = module.params.get(attr) module_attr = module.params.get(attr)
if attr == 'vpc_zone_identifier':
module_attr = ','.join(module_attr)
group_attr = getattr(as_group, attr) group_attr = getattr(as_group, attr)
# we do this because AWS and the module may return the same list # we do this because AWS and the module may return the same list
# sorted differently # sorted differently
@ -357,6 +367,7 @@ def create_autoscaling_group(connection, module):
continue continue
if changed: if changed:
connection.create_or_update_tags(asg_tags) connection.create_or_update_tags(asg_tags)
as_group.tags = asg_tags
# handle loadbalancers separately because None != [] # handle loadbalancers separately because None != []
load_balancers = module.params.get('load_balancers') or [] load_balancers = module.params.get('load_balancers') or []
@ -373,26 +384,6 @@ def create_autoscaling_group(connection, module):
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))
result = as_groups[0]
module.exit_json(changed=changed, name=result.name,
autoscaling_group_arn=result.autoscaling_group_arn,
availability_zones=result.availability_zones,
created_time=str(result.created_time),
default_cooldown=result.default_cooldown,
health_check_period=result.health_check_period,
health_check_type=result.health_check_type,
instance_id=result.instance_id,
instances=[instance.instance_id for instance in result.instances],
launch_config_name=result.launch_config_name,
load_balancers=result.load_balancers,
min_size=result.min_size, max_size=result.max_size,
placement_group=result.placement_group,
wait_timeout = dict(default=300),
tags=result.tags,
termination_policies=result.termination_policies,
vpc_zone_identifier=result.vpc_zone_identifier)
def delete_autoscaling_group(connection, module): def delete_autoscaling_group(connection, module):
group_name = module.params.get('name') group_name = module.params.get('name')
groups = connection.get_all_groups(names=[group_name]) groups = connection.get_all_groups(names=[group_name])
@ -426,12 +417,13 @@ def replace(connection, module):
batch_size = module.params.get('replace_batch_size') batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout') wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('group_name') group_name = module.params.get('name')
max_size = module.params.get('max_size') max_size = module.params.get('max_size')
min_size = module.params.get('min_size') min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity') desired_capacity = module.params.get('desired_capacity')
replace_instances = module.params.get('replace_instances')
# FIXME: we need some more docs about this feature
replace_instances = module.params.get('replace_instances')
# wait for instance list to be populated on a newly provisioned ASG # wait for instance list to be populated on a newly provisioned ASG
instance_wait = time.time() + 30 instance_wait = time.time() + 30
@ -444,7 +436,7 @@ def replace(connection, module):
time.sleep(10) time.sleep(10)
if instance_wait <= time.time(): if instance_wait <= time.time():
# waiting took too long # waiting took too long
module.fail_json(msg = "Waited too for instances to appear. %s" % time.asctime()) module.fail_json(msg = "Waited too long for instances to appear. %s" % time.asctime())
# determine if we need to continue # determine if we need to continue
replaceable = 0 replaceable = 0
if replace_instances: if replace_instances:
@ -470,7 +462,7 @@ def replace(connection, module):
props = get_properties(as_group) props = get_properties(as_group)
if wait_timeout <= time.time(): if wait_timeout <= time.time():
# waiting took too long # waiting took too long
module.fail_json(msg = "Waited too for instances to appear. %s" % time.asctime()) module.fail_json(msg = "Waited too long for instances to appear. %s" % time.asctime())
instances = props['instances'] instances = props['instances']
if replace_instances: if replace_instances:
instances = replace_instances instances = replace_instances
@ -490,7 +482,7 @@ def replace(connection, module):
def replace_batch(connection, module, replace_instances): def replace_batch(connection, module, replace_instances):
group_name = module.params.get('group_name') group_name = module.params.get('name')
wait_timeout = int(module.params.get('wait_timeout')) wait_timeout = int(module.params.get('wait_timeout'))
lc_check = module.params.get('lc_check') lc_check = module.params.get('lc_check')
@ -567,7 +559,7 @@ def main():
min_size=dict(type='int'), min_size=dict(type='int'),
max_size=dict(type='int'), max_size=dict(type='int'),
desired_capacity=dict(type='int'), desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='str'), vpc_zone_identifier=dict(type='list'),
replace_batch_size=dict(type='int', default=1), replace_batch_size=dict(type='int', default=1),
replace_all_instances=dict(type='bool', default=False), replace_all_instances=dict(type='bool', default=False),
replace_instances=dict(type='list', default=[]), replace_instances=dict(type='list', default=[]),
@ -577,9 +569,13 @@ def main():
tags=dict(type='list', default=[]), tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300), health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
),
) )
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['replace_all_instances', 'replace_instances']]
) )
module = AnsibleModule(argument_spec=argument_spec)
state = module.params.get('state') state = module.params.get('state')
replace_instances = module.params.get('replace_instances') replace_instances = module.params.get('replace_instances')
@ -591,16 +587,16 @@ def main():
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region)) module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound, e: except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))
changed = False changed = create_changed = replace_changed = False
if replace_all_instances and replace_instances:
module.fail_json(msg="You can't use replace_instances and replace_all_instances in the same task.")
if state == 'present': if state == 'present':
create_changed, asg_properties=create_autoscaling_group(connection, module) create_changed, asg_properties=create_autoscaling_group(connection, module)
if replace_all_instances or replace_instances:
replace_changed, asg_properties=replace(connection, module)
elif state == 'absent': elif state == 'absent':
changed = delete_autoscaling_group(connection, module) changed = delete_autoscaling_group(connection, module)
module.exit_json( changed = changed ) module.exit_json( changed = changed )
if replace_all_instances or replace_instances:
replace_changed, asg_properties=replace(connection, module)
if create_changed or replace_changed: if create_changed or replace_changed:
changed = True changed = True
module.exit_json( changed = changed, **asg_properties ) module.exit_json( changed = changed, **asg_properties )

View file

@ -69,13 +69,13 @@ EXAMPLES = '''
ec2_eip: instance_id=i-1212f003 ec2_eip: instance_id=i-1212f003
- name: allocate a new elastic IP without associating it to anything - name: allocate a new elastic IP without associating it to anything
ec2_eip: action: ec2_eip
register: eip register: eip
- name: output the IP - name: output the IP
debug: msg="Allocated IP is {{ eip.public_ip }}" debug: msg="Allocated IP is {{ eip.public_ip }}"
- name: provision new instances with ec2 - name: provision new instances with ec2
ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes group=webserver count=3 ec2: keypair=mykey instance_type=c1.medium image=ami-40603AD1 wait=yes group=webserver count=3
register: ec2 register: ec2
- name: associate new elastic IPs with each of the instances - name: associate new elastic IPs with each of the instances
ec2_eip: "instance_id={{ item }}" ec2_eip: "instance_id={{ item }}"

View file

@ -80,18 +80,18 @@ EXAMPLES = """
# basic pre_task and post_task example # basic pre_task and post_task example
pre_tasks: pre_tasks:
- name: Gathering ec2 facts - name: Gathering ec2 facts
ec2_facts: action: ec2_facts
- name: Instance De-register - name: Instance De-register
local_action: ec2_elb local_action:
args: module: ec2_elb
instance_id: "{{ ansible_ec2_instance_id }}" instance_id: "{{ ansible_ec2_instance_id }}"
state: 'absent' state: 'absent'
roles: roles:
- myrole - myrole
post_tasks: post_tasks:
- name: Instance Register - name: Instance Register
local_action: ec2_elb local_action:
args: module: ec2_elb
instance_id: "{{ ansible_ec2_instance_id }}" instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}" ec2_elbs: "{{ item }}"
state: 'present' state: 'present'
@ -258,7 +258,7 @@ class ElbManager:
try: try:
elb = connect_to_aws(boto.ec2.elb, self.region, elb = connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params) **self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e: except (boto.exception.NoAuthHandlerFound, StandardError), e:
self.module.fail_json(msg=str(e)) self.module.fail_json(msg=str(e))
elbs = elb.get_all_load_balancers() elbs = elb.get_all_load_balancers()
@ -278,7 +278,7 @@ class ElbManager:
try: try:
ec2 = connect_to_aws(boto.ec2, self.region, ec2 = connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params) **self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e: except (boto.exception.NoAuthHandlerFound, StandardError), e:
self.module.fail_json(msg=str(e)) self.module.fail_json(msg=str(e))
return ec2.get_only_instances(instance_ids=[self.instance_id])[0] return ec2.get_only_instances(instance_ids=[self.instance_id])[0]

View file

@ -115,7 +115,8 @@ EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region. # Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set. # It is assumed that their matching environment variables are set.
# Basic provisioning example # Basic provisioning example (non-VPC)
- local_action: - local_action:
module: ec2_elb_lb module: ec2_elb_lb
name: "test-please-delete" name: "test-please-delete"
@ -134,8 +135,8 @@ EXAMPLES = """
# ssl certificate required for https or ssl # ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert" ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Internal ELB example
# Basic VPC provisioning example
- local_action: - local_action:
module: ec2_elb_lb module: ec2_elb_lb
name: "test-vpc" name: "test-vpc"
@ -214,7 +215,7 @@ EXAMPLES = """
name: 'New ELB' name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890' security_group_ids: 'sg-123456, sg-67890'
region: us-west-2 region: us-west-2
subnets: 'subnet-123456, subnet-67890' subnets: 'subnet-123456,subnet-67890'
purge_subnets: yes purge_subnets: yes
listeners: listeners:
- protocol: http - protocol: http
@ -374,7 +375,7 @@ class ElbManager(object):
try: try:
return connect_to_aws(boto.ec2.elb, self.region, return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params) **self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e: except (boto.exception.NoAuthHandlerFound, StandardError), e:
self.module.fail_json(msg=str(e)) self.module.fail_json(msg=str(e))
def _delete_elb(self): def _delete_elb(self):

View file

@ -34,8 +34,6 @@ description:
- This module fetches data from the metadata servers in ec2 (aws) as per - This module fetches data from the metadata servers in ec2 (aws) as per
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html. http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html.
The module must be called from within the EC2 instance itself. The module must be called from within the EC2 instance itself.
Eucalyptus cloud provides a similar service and this module should
work with this cloud provider as well.
notes: notes:
- Parameters to filter on ec2_facts may be added later. - Parameters to filter on ec2_facts may be added later.
author: "Silviu Dicu <silviudicu@gmail.com>" author: "Silviu Dicu <silviudicu@gmail.com>"
@ -65,6 +63,7 @@ class Ec2Metadata(object):
AWS_REGIONS = ('ap-northeast-1', AWS_REGIONS = ('ap-northeast-1',
'ap-southeast-1', 'ap-southeast-1',
'ap-southeast-2', 'ap-southeast-2',
'eu-central-1',
'eu-west-1', 'eu-west-1',
'sa-east-1', 'sa-east-1',
'us-east-1', 'us-east-1',

View file

@ -55,7 +55,7 @@ options:
purge_rules_egress: purge_rules_egress:
version_added: "1.8" version_added: "1.8"
description: description:
- Purge existing rules_egree on security group that are not found in rules_egress - Purge existing rules_egress on security group that are not found in rules_egress
required: false required: false
default: 'true' default: 'true'
aliases: [] aliases: []
@ -70,8 +70,7 @@ notes:
EXAMPLES = ''' EXAMPLES = '''
- name: example ec2 group - name: example ec2 group
local_action: ec2_group:
module: ec2_group
name: example name: example
description: an example EC2 group description: an example EC2 group
vpc_id: 12345 vpc_id: 12345
@ -102,6 +101,7 @@ EXAMPLES = '''
- proto: tcp - proto: tcp
from_port: 80 from_port: 80
to_port: 80 to_port: 80
cidr_ip: 0.0.0.0/0
group_name: example-other group_name: example-other
# description to use if example-other needs to be created # description to use if example-other needs to be created
group_desc: other example EC2 group group_desc: other example EC2 group
@ -114,11 +114,21 @@ except ImportError:
sys.exit(1) sys.exit(1)
def make_rule_key(prefix, rule, group_id, cidr_ip):
"""Creates a unique key for an individual group rule"""
if isinstance(rule, dict):
proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')]
else: # isinstance boto.ec2.securitygroup.IPPermissions
proto, from_port, to_port = [getattr(rule, x, None) for x in ('ip_protocol', 'from_port', 'to_port')]
key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip)
return key.lower().replace('-none', '-None')
def addRulesToLookup(rules, prefix, dict): def addRulesToLookup(rules, prefix, dict):
for rule in rules: for rule in rules:
for grant in rule.grants: for grant in rule.grants:
dict["%s-%s-%s-%s-%s-%s" % (prefix, rule.ip_protocol, rule.from_port, rule.to_port, dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = rule
grant.group_id, grant.cidr_ip)] = rule
def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id): def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id):
@ -279,7 +289,7 @@ def main():
rule['to_port'] = None rule['to_port'] = None
# If rule already exists, don't later delete it # If rule already exists, don't later delete it
ruleId = "%s-%s-%s-%s-%s-%s" % ('in', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip) ruleId = make_rule_key('in', rule, group_id, ip)
if ruleId in groupRules: if ruleId in groupRules:
del groupRules[ruleId] del groupRules[ruleId]
# Otherwise, add new rule # Otherwise, add new rule
@ -320,7 +330,7 @@ def main():
rule['to_port'] = None rule['to_port'] = None
# If rule already exists, don't later delete it # If rule already exists, don't later delete it
ruleId = "%s-%s-%s-%s-%s-%s" % ('out', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip) ruleId = make_rule_key('out', rule, group_id, ip)
if ruleId in groupRules: if ruleId in groupRules:
del groupRules[ruleId] del groupRules[ruleId]
# Otherwise, add new rule # Otherwise, add new rule

View file

@ -56,15 +56,13 @@ EXAMPLES = '''
# Creates a new ec2 key pair named `example` if not present, returns generated # Creates a new ec2 key pair named `example` if not present, returns generated
# private key # private key
- name: example ec2 key - name: example ec2 key
local_action: ec2_key:
module: ec2_key
name: example name: example
# Creates a new ec2 key pair named `example` if not present using provided key # Creates a new ec2 key pair named `example` if not present using provided key
# material # material. This could use the 'file' lookup plugin to pull this off disk.
- name: example2 ec2 key - name: example2 ec2 key
local_action: ec2_key:
module: ec2_key
name: example2 name: example2
key_material: 'ssh-rsa AAAAxyz...== me@example.com' key_material: 'ssh-rsa AAAAxyz...== me@example.com'
state: present state: present
@ -72,16 +70,14 @@ EXAMPLES = '''
# Creates a new ec2 key pair named `example` if not present using provided key # Creates a new ec2 key pair named `example` if not present using provided key
# material # material
- name: example3 ec2 key - name: example3 ec2 key
local_action: ec2_key:
module: ec2_key
name: example3 name: example3
key_material: "{{ item }}" key_material: "{{ item }}"
with_file: /path/to/public_key.id_rsa.pub with_file: /path/to/public_key.id_rsa.pub
# Removes ec2 key pair by name # Removes ec2 key pair by name
- name: remove example key - name: remove example key
local_action: ec2_key:
module: ec2_key
name: example name: example
state: absent state: absent
''' '''

7
cloud/ec2_lc.py → cloud/amazon/ec2_lc.py Executable file → Normal file
View file

@ -93,7 +93,6 @@ options:
description: description:
- Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC. - Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.
required: false required: false
default: false
aliases: [] aliases: []
version_added: "1.8" version_added: "1.8"
ramdisk_id: ramdisk_id:
@ -125,7 +124,7 @@ EXAMPLES = '''
name: special name: special
image_id: ami-XXX image_id: ami-XXX
key_name: default key_name: default
security_groups: 'group,group2' security_groups: ['group', 'group2' ]
instance_type: t1.micro instance_type: t1.micro
''' '''
@ -255,7 +254,7 @@ def main():
ebs_optimized=dict(default=False, type='bool'), ebs_optimized=dict(default=False, type='bool'),
associate_public_ip_address=dict(type='bool'), associate_public_ip_address=dict(type='bool'),
instance_monitoring=dict(default=False, type='bool'), instance_monitoring=dict(default=False, type='bool'),
assign_public_ip=dict(default=False, type='bool') assign_public_ip=dict(type='bool')
) )
) )
@ -265,7 +264,7 @@ def main():
try: try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e: except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))
state = module.params.get('state') state = module.params.get('state')

View file

@ -271,7 +271,7 @@ def main():
region, ec2_url, aws_connect_params = get_aws_connection_info(module) region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try: try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params) connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e: except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))
if state == 'present': if state == 'present':

View file

@ -163,9 +163,7 @@ def main():
try: try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
if not connection: except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e)) module.fail_json(msg = str(e))
if state == 'present': if state == 'present':

View file

@ -48,6 +48,32 @@ options:
- a hash/dictionary of tags to add to the snapshot - a hash/dictionary of tags to add to the snapshot
required: false required: false
version_added: "1.6" version_added: "1.6"
wait:
description:
- wait for the snapshot to be ready
choices: ['yes', 'no']
required: false
default: yes
version_added: "1.5.1"
wait_timeout:
description:
- how long before wait gives up, in seconds
- specify 0 to wait forever
required: false
default: 0
version_added: "1.5.1"
state:
description:
- whether to add or create a snapshot
required: false
default: present
choices: ['absent', 'present']
version_added: "1.9"
snapshot_id:
description:
- snapshot id to remove
required: false
version_added: "1.9"
author: Will Thames author: Will Thames
extends_documentation_fragment: aws extends_documentation_fragment: aws
@ -55,26 +81,29 @@ extends_documentation_fragment: aws
EXAMPLES = ''' EXAMPLES = '''
# Simple snapshot of volume using volume_id # Simple snapshot of volume using volume_id
- local_action: - ec2_snapshot:
module: ec2_snapshot
volume_id: vol-abcdef12 volume_id: vol-abcdef12
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32 description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume mounted on device_name attached to instance_id # Snapshot of volume mounted on device_name attached to instance_id
- local_action: - ec2_snapshot:
module: ec2_snapshot
instance_id: i-12345678 instance_id: i-12345678
device_name: /dev/sdb1 device_name: /dev/sdb1
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32 description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume with tagging # Snapshot of volume with tagging
- local_action: - ec2_snapshot:
module: ec2_snapshot
instance_id: i-12345678 instance_id: i-12345678
device_name: /dev/sdb1 device_name: /dev/sdb1
snapshot_tags: snapshot_tags:
frequency: hourly frequency: hourly
source: /data source: /data
# Remove a snapshot
- local_action:
module: ec2_snapshot
snapshot_id: snap-abcd1234
state: absent
''' '''
import sys import sys
@ -93,24 +122,28 @@ def main():
volume_id = dict(), volume_id = dict(),
description = dict(), description = dict(),
instance_id = dict(), instance_id = dict(),
snapshot_id = dict(),
device_name = dict(), device_name = dict(),
wait = dict(type='bool', default='true'), wait = dict(type='bool', default='true'),
wait_timeout = dict(default=0), wait_timeout = dict(default=0),
snapshot_tags = dict(type='dict', default=dict()), snapshot_tags = dict(type='dict', default=dict()),
state = dict(choices=['absent','present'], default='present'),
) )
) )
module = AnsibleModule(argument_spec=argument_spec) module = AnsibleModule(argument_spec=argument_spec)
volume_id = module.params.get('volume_id') volume_id = module.params.get('volume_id')
snapshot_id = module.params.get('snapshot_id')
description = module.params.get('description') description = module.params.get('description')
instance_id = module.params.get('instance_id') instance_id = module.params.get('instance_id')
device_name = module.params.get('device_name') device_name = module.params.get('device_name')
wait = module.params.get('wait') wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout') wait_timeout = module.params.get('wait_timeout')
snapshot_tags = module.params.get('snapshot_tags') snapshot_tags = module.params.get('snapshot_tags')
state = module.params.get('state')
if not volume_id and not instance_id or volume_id and instance_id: if not volume_id and not instance_id and not snapshot_id or volume_id and instance_id and snapshot_id:
module.fail_json('One and only one of volume_id or instance_id must be specified') module.fail_json('One and only one of volume_id or instance_id or snapshot_id must be specified')
if instance_id and not device_name or device_name and not instance_id: if instance_id and not device_name or device_name and not instance_id:
module.fail_json('Instance ID and device name must both be specified') module.fail_json('Instance ID and device name must both be specified')
@ -125,6 +158,20 @@ def main():
except boto.exception.BotoServerError, e: except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if state == 'absent':
if not snapshot_id:
module.fail_json(msg = 'snapshot_id must be set when state is absent')
try:
snapshots = ec2.get_all_snapshots([snapshot_id])
ec2.delete_snapshot(snapshot_id)
module.exit_json(changed=True)
except boto.exception.BotoServerError, e:
# exception is raised if snapshot does not exist
if e.error_code == 'InvalidSnapshot.NotFound':
module.exit_json(changed=False)
else:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
try: try:
snapshot = ec2.create_snapshot(volume_id, description=description) snapshot = ec2.create_snapshot(volume_id, description=description)
time_waited = 0 time_waited = 0

View file

@ -50,7 +50,7 @@ EXAMPLES = '''
# Basic example of adding tag(s) # Basic example of adding tag(s)
tasks: tasks:
- name: tag a resource - name: tag a resource
local_action: ec2_tag resource=vol-XXXXXX region=eu-west-1 state=present ec2_tag: resource=vol-XXXXXX region=eu-west-1 state=present
args: args:
tags: tags:
Name: ubervol Name: ubervol
@ -59,11 +59,11 @@ tasks:
# Playbook example of adding tag(s) to spawned instances # Playbook example of adding tag(s) to spawned instances
tasks: tasks:
- name: launch some instances - name: launch some instances
local_action: ec2 keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image_id }} wait=true region=eu-west-1 ec2: keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image_id }} wait=true region=eu-west-1
register: ec2 register: ec2
- name: tag my launched instances - name: tag my launched instances
local_action: ec2_tag resource={{ item.id }} region=eu-west-1 state=present ec2_tag: resource={{ item.id }} region=eu-west-1 state=present
with_items: ec2.instances with_items: ec2.instances
args: args:
tags: tags:
@ -71,11 +71,6 @@ tasks:
env: prod env: prod
''' '''
# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes.
# if state=present and it doesn't exist, create, tag and attach.
# Check for state by looking for volume attachment with tag (and against block device mapping?).
# Would personally like to revisit this in May when Eucalyptus also has tagging support (3.3).
import sys import sys
import time import time

View file

@ -48,6 +48,14 @@ options:
required: false required: false
default: null default: null
aliases: [] aliases: []
volume_type:
description:
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default
and continues to remain the Ansible default for backwards compatibility.
required: false
default: standard
aliases: []
version_added: "1.9"
iops: iops:
description: description:
- the provisioned IOPs you want to associate with this volume (integer). - the provisioned IOPs you want to associate with this volume (integer).
@ -105,36 +113,31 @@ extends_documentation_fragment: aws
EXAMPLES = ''' EXAMPLES = '''
# Simple attachment action # Simple attachment action
- local_action: - ec2_vol:
module: ec2_vol
instance: XXXXXX instance: XXXXXX
volume_size: 5 volume_size: 5
device_name: sdd device_name: sdd
# Example using custom iops params # Example using custom iops params
- local_action: - ec2_vol:
module: ec2_vol
instance: XXXXXX instance: XXXXXX
volume_size: 5 volume_size: 5
iops: 200 iops: 200
device_name: sdd device_name: sdd
# Example using snapshot id # Example using snapshot id
- local_action: - ec2_vol:
module: ec2_vol
instance: XXXXXX instance: XXXXXX
snapshot: "{{ snapshot }}" snapshot: "{{ snapshot }}"
# Playbook example combined with instance launch # Playbook example combined with instance launch
- local_action: - ec2:
module: ec2
keypair: "{{ keypair }}" keypair: "{{ keypair }}"
image: "{{ image }}" image: "{{ image }}"
wait: yes wait: yes
count: 3 count: 3
register: ec2 register: ec2
- local_action: - ec2_vol:
module: ec2_vol
instance: "{{ item.id }} " instance: "{{ item.id }} "
volume_size: 5 volume_size: 5
with_items: ec2.instances with_items: ec2.instances
@ -144,8 +147,7 @@ EXAMPLES = '''
# * Nothing will happen if the volume is already attached. # * Nothing will happen if the volume is already attached.
# * Volume must exist in the same zone. # * Volume must exist in the same zone.
- local_action: - ec2:
module: ec2
keypair: "{{ keypair }}" keypair: "{{ keypair }}"
image: "{{ image }}" image: "{{ image }}"
zone: YYYYYY zone: YYYYYY
@ -154,8 +156,7 @@ EXAMPLES = '''
count: 1 count: 1
register: ec2 register: ec2
- local_action: - ec2_vol:
module: ec2_vol
instance: "{{ item.id }}" instance: "{{ item.id }}"
name: my_existing_volume_Name_tag name: my_existing_volume_Name_tag
device_name: /dev/xvdf device_name: /dev/xvdf
@ -163,22 +164,27 @@ EXAMPLES = '''
register: ec2_vol register: ec2_vol
# Remove a volume # Remove a volume
- local_action: - ec2_vol:
module: ec2_vol
id: vol-XXXXXXXX id: vol-XXXXXXXX
state: absent state: absent
# Detach a volume
- ec2_vol:
id: vol-XXXXXXXX
instance: None
# List volumes for an instance # List volumes for an instance
- local_action: - ec2_vol:
module: ec2_vol
instance: i-XXXXXX instance: i-XXXXXX
state: list state: list
'''
# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes. # Create new volume using SSD storage
# if state=present and it doesn't exist, create, tag and attach. - ec2_vol:
# Check for state by looking for volume attachment with tag (and against block device mapping?). instance: XXXXXX
# Would personally like to revisit this in May when Eucalyptus also has tagging support (3.3). volume_size: 50
volume_type: gp2
device_name: /dev/xvdf
'''
import sys import sys
import time import time
@ -253,22 +259,24 @@ def create_volume(module, ec2, zone):
iops = module.params.get('iops') iops = module.params.get('iops')
encrypted = module.params.get('encrypted') encrypted = module.params.get('encrypted')
volume_size = module.params.get('volume_size') volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
snapshot = module.params.get('snapshot') snapshot = module.params.get('snapshot')
# If custom iops is defined we use volume_type "io1" rather than the default of "standard" # If custom iops is defined we use volume_type "io1" rather than the default of "standard"
if iops: if iops:
volume_type = 'io1' volume_type = 'io1'
else:
volume_type = 'standard' if instance == 'None' or instance == '':
instance = None
# If no instance supplied, try volume creation based on module parameters. # If no instance supplied, try volume creation based on module parameters.
if name or id: if name or id:
if not instance:
module.fail_json(msg = "If name or id is specified, instance must also be specified")
if iops or volume_size: if iops or volume_size:
module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]") module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]")
volume = get_volume(module, ec2) volume = get_volume(module, ec2)
if volume.attachment_state() is not None: if volume.attachment_state() is not None:
if instance is None:
return volume
adata = volume.attach_data adata = volume.attach_data
if adata.instance_id != instance: if adata.instance_id != instance:
module.fail_json(msg = "Volume %s is already attached to another instance: %s" module.fail_json(msg = "Volume %s is already attached to another instance: %s"
@ -330,6 +338,13 @@ def attach_volume(module, ec2, volume, instance):
except boto.exception.BotoServerError, e: except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
def detach_volume(module, ec2):
vol = get_volume(module, ec2)
if not vol or vol.attachment_state() is None:
module.exit_json(changed=False)
else:
vol.detach()
module.exit_json(changed=True)
def main(): def main():
argument_spec = ec2_argument_spec() argument_spec = ec2_argument_spec()
@ -338,6 +353,7 @@ def main():
id = dict(), id = dict(),
name = dict(), name = dict(),
volume_size = dict(), volume_size = dict(),
volume_type = dict(choices=['standard', 'gp2', 'io1'], default='standard'),
iops = dict(), iops = dict(),
encrypted = dict(), encrypted = dict(),
device_name = dict(), device_name = dict(),
@ -352,6 +368,7 @@ def main():
name = module.params.get('name') name = module.params.get('name')
instance = module.params.get('instance') instance = module.params.get('instance')
volume_size = module.params.get('volume_size') volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
iops = module.params.get('iops') iops = module.params.get('iops')
encrypted = module.params.get('encrypted') encrypted = module.params.get('encrypted')
device_name = module.params.get('device_name') device_name = module.params.get('device_name')
@ -359,6 +376,9 @@ def main():
snapshot = module.params.get('snapshot') snapshot = module.params.get('snapshot')
state = module.params.get('state') state = module.params.get('state')
if instance == 'None' or instance == '':
instance = None
ec2 = ec2_connect(module) ec2 = ec2_connect(module)
if state == 'list': if state == 'list':
@ -425,7 +445,9 @@ def main():
volume = create_volume(module, ec2, zone) volume = create_volume(module, ec2, zone)
if instance: if instance:
attach_volume(module, ec2, volume, inst) attach_volume(module, ec2, volume, inst)
module.exit_json(volume_id=volume.id, device=device_name) else:
detach_volume(module, ec2)
module.exit_json(volume_id=volume.id, device=device_name, volume_type=volume.type)
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import *

View file

@ -130,16 +130,14 @@ EXAMPLES = '''
# It is assumed that their matching environment variables are set. # It is assumed that their matching environment variables are set.
# Basic creation example: # Basic creation example:
local_action: ec2_vpc:
module: ec2_vpc
state: present state: present
cidr_block: 172.23.0.0/16 cidr_block: 172.23.0.0/16
resource_tags: { "Environment":"Development" } resource_tags: { "Environment":"Development" }
region: us-west-2 region: us-west-2
# Full creation example with subnets and optional availability zones. # Full creation example with subnets and optional availability zones.
# The absence or presence of subnets deletes or creates them respectively. # The absence or presence of subnets deletes or creates them respectively.
local_action: ec2_vpc:
module: ec2_vpc
state: present state: present
cidr_block: 172.22.0.0/16 cidr_block: 172.22.0.0/16
resource_tags: { "Environment":"Development" } resource_tags: { "Environment":"Development" }
@ -170,8 +168,7 @@ EXAMPLES = '''
register: vpc register: vpc
# Removal of a VPC by id # Removal of a VPC by id
local_action: ec2_vpc:
module: ec2_vpc
state: absent state: absent
vpc_id: vpc-aaaaaaa vpc_id: vpc-aaaaaaa
region: us-west-2 region: us-west-2

View file

@ -111,8 +111,7 @@ EXAMPLES = """
# It is assumed that their matching environment variables are set. # It is assumed that their matching environment variables are set.
# Basic example # Basic example
- local_action: - elasticache:
module: elasticache
name: "test-please-delete" name: "test-please-delete"
state: present state: present
engine: memcached engine: memcached
@ -126,14 +125,12 @@ EXAMPLES = """
# Ensure cache cluster is gone # Ensure cache cluster is gone
- local_action: - elasticache:
module: elasticache
name: "test-please-delete" name: "test-please-delete"
state: absent state: absent
# Reboot cache cluster # Reboot cache cluster
- local_action: - elasticache:
module: elasticache
name: "test-please-delete" name: "test-please-delete"
state: rebooted state: rebooted

View file

@ -224,44 +224,45 @@ requirements: [ "boto" ]
author: Bruce Pennypacker author: Bruce Pennypacker
''' '''
# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
EXAMPLES = ''' EXAMPLES = '''
# Basic mysql provisioning example # Basic mysql provisioning example
- rds: > - rds:
command=create command: create
instance_name=new_database instance_name: new_database
db_engine=MySQL db_engine: MySQL
size=10 size: 10
instance_type=db.m1.small instance_type: db.m1.small
username=mysql_admin username: mysql_admin
password=1nsecure password: 1nsecure
# Create a read-only replica and wait for it to become available # Create a read-only replica and wait for it to become available
- rds: > - rds:
command=replicate command: replicate
instance_name=new_database_replica instance_name: new_database_replica
source_instance=new_database source_instance: new_database
wait=yes wait: yes
wait_timeout=600 wait_timeout: 600
# Delete an instance, but create a snapshot before doing so # Delete an instance, but create a snapshot before doing so
- rds: > - rds:
command=delete command: delete
instance_name=new_database instance_name: new_database
snapshot=new_database_snapshot snapshot: new_database_snapshot
# Get facts about an instance # Get facts about an instance
- rds: > - rds:
command=facts command: facts
instance_name=new_database instance_name: new_database
register: new_database_facts register: new_database_facts
# Rename an instance and wait for the change to take effect # Rename an instance and wait for the change to take effect
- rds: > - rds:
command=modify command: modify
instance_name=new_database instance_name: new_database
new_instance_name=renamed_database new_instance_name: renamed_database
wait=yes wait: yes
''' '''
import sys import sys

View file

@ -85,17 +85,18 @@ author: Scott Anderson
EXAMPLES = ''' EXAMPLES = '''
# Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024 # Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024
- rds_param_group: > - rds_param_group:
state=present state: present
name=norwegian_blue name: norwegian_blue
description=My Fancy Ex Parrot Group description: 'My Fancy Ex Parrot Group'
engine=mysql5.6 engine: 'mysql5.6'
params='{"auto_increment_increment": "42K"}' params:
auto_increment_increment: "42K"
# Remove a parameter group # Remove a parameter group
- rds_param_group: > - rds_param_group:
state=absent state: absent
name=norwegian_blue name: norwegian_blue
''' '''
import sys import sys

View file

@ -71,8 +71,7 @@ author: Scott Anderson
EXAMPLES = ''' EXAMPLES = '''
# Add or change a subnet group # Add or change a subnet group
- local_action: - rds_subnet_group
module: rds_subnet_group
state: present state: present
name: norwegian-blue name: norwegian-blue
description: My Fancy Ex Parrot Subnet Group description: My Fancy Ex Parrot Subnet Group
@ -80,10 +79,10 @@ EXAMPLES = '''
- subnet-aaaaaaaa - subnet-aaaaaaaa
- subnet-bbbbbbbb - subnet-bbbbbbbb
# Remove a parameter group # Remove a subnet group
- rds_param_group: > - rds_subnet_group:
state=absent state: absent
name=norwegian-blue name: norwegian-blue
''' '''
import sys import sys

View file

@ -88,51 +88,54 @@ requirements: [ "boto" ]
author: Bruce Pennypacker author: Bruce Pennypacker
''' '''
# FIXME: the command stuff should have a more state like configuration alias -- MPD
EXAMPLES = ''' EXAMPLES = '''
# Add new.foo.com as an A record with 3 IPs # Add new.foo.com as an A record with 3 IPs
- route53: > - route53:
command=create command: create
zone=foo.com zone: foo.com
record=new.foo.com record: new.foo.com
type=A type: A
ttl=7200 ttl: 7200
value=1.1.1.1,2.2.2.2,3.3.3.3 value: 1.1.1.1,2.2.2.2,3.3.3.3
# Retrieve the details for new.foo.com # Retrieve the details for new.foo.com
- route53: > - route53:
command=get command: get
zone=foo.com zone: foo.com
record=new.foo.com record: new.foo.com
type=A type: A
register: rec register: rec
# Delete new.foo.com A record using the results from the get command # Delete new.foo.com A record using the results from the get command
- route53: > - route53:
command=delete command: delete
zone=foo.com zone: foo.com
record={{ rec.set.record }} record: "{{ rec.set.record }}"
type={{ rec.set.type }} ttl: "{{ rec.set.ttl }}"
value={{ rec.set.value }} type: "{{ rec.set.type }}"
value: "{{ rec.set.value }}"
# Add an AAAA record. Note that because there are colons in the value # Add an AAAA record. Note that because there are colons in the value
# that the entire parameter list must be quoted: # that the entire parameter list must be quoted:
- route53: > - route53:
command=create command: "create"
zone=foo.com zone: "foo.com"
record=localhost.foo.com record: "localhost.foo.com"
type=AAAA type: "AAAA"
ttl=7200 ttl: "7200"
value="::1" value: "::1"
# Add a TXT record. Note that TXT and SPF records must be surrounded # Add a TXT record. Note that TXT and SPF records must be surrounded
# by quotes when sent to Route 53: # by quotes when sent to Route 53:
- route53: > - route53:
command=create command: "create"
zone=foo.com zone: "foo.com"
record=localhost.foo.com record: "localhost.foo.com"
type=TXT type: "TXT"
ttl=7200 ttl: "7200"
value="\"bar\"" value: '"bar"'
''' '''
@ -160,7 +163,7 @@ def commit(changes, retry_interval):
code = code.split("</Code>")[0] code = code.split("</Code>")[0]
if code != 'PriorRequestNotComplete' or retry < 0: if code != 'PriorRequestNotComplete' or retry < 0:
raise e raise e
time.sleep(retry_interval) time.sleep(float(retry_interval))
def main(): def main():
argument_spec = ec2_argument_spec() argument_spec = ec2_argument_spec()

View file

@ -68,7 +68,7 @@ options:
aliases: [] aliases: []
s3_url: s3_url:
description: description:
- "S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined. Ansible tries to guess if fakes3 (https://github.com/jubos/fake-s3) or Eucalyptus Walrus (https://github.com/eucalyptus/eucalyptus/wiki/Walrus) is used and configure connection accordingly. Current heuristic is: everything with scheme fakes3:// is fakes3, everything else not ending with amazonaws.com is Walrus." - "S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS"
default: null default: null
aliases: [ S3_URL ] aliases: [ S3_URL ]
aws_secret_key: aws_secret_key:
@ -103,28 +103,19 @@ author: Lester Wade, Ralph Tice
EXAMPLES = ''' EXAMPLES = '''
# Simple PUT operation # Simple PUT operation
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put
# Simple GET operation # Simple GET operation
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get - s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get
# GET/download and overwrite local file (trust remote)
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get
# GET/download and do not overwrite local file (trust remote)
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get force=false
# PUT/upload and overwrite remote file (trust local)
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put
# PUT/upload with metadata # PUT/upload with metadata
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip'
# PUT/upload with multiple metadata
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache'
# PUT/upload and do not overwrite remote file (trust local)
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put force=false
# Download an object as a string to use else where in your playbook
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=getstr
# Create an empty bucket # Create an empty bucket
- s3: bucket=mybucket mode=create - s3: bucket=mybucket mode=create
# Create a bucket with key as directory
- s3: bucket=mybucket object=/my/directory/path mode=create # Create a bucket with key as directory, in the EU region
# Create an empty bucket in the EU region - s3: bucket=mybucket object=/my/directory/path mode=create region=eu-west-1
- s3: bucket=mybucket mode=create region=eu-west-1
# Delete a bucket and all contents # Delete a bucket and all contents
- s3: bucket=mybucket mode=delete - s3: bucket=mybucket mode=delete
''' '''

0
cloud/azure/__init__.py Normal file
View file

View file

View file

@ -27,7 +27,7 @@ options:
description: description:
- Indicate desired state of the target. - Indicate desired state of the target.
default: present default: present
choices: ['present', 'active', 'absent', 'deleted'] choices: ['present', 'absent']
client_id: client_id:
description: description:
- DigitalOcean manager id. - DigitalOcean manager id.
@ -217,7 +217,7 @@ def core(module):
def main(): def main():
module = AnsibleModule( module = AnsibleModule(
argument_spec = dict( argument_spec = dict(
state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'), state = dict(choices=['present', 'absent'], default='present'),
client_id = dict(aliases=['CLIENT_ID'], no_log=True), client_id = dict(aliases=['CLIENT_ID'], no_log=True),
api_key = dict(aliases=['API_KEY'], no_log=True), api_key = dict(aliases=['API_KEY'], no_log=True),
name = dict(type='str'), name = dict(type='str'),

0
cloud/docker/__init__.py Normal file
View file

View file

@ -23,6 +23,7 @@
DOCUMENTATION = ''' DOCUMENTATION = '''
--- ---
module: docker_image module: docker_image
deprecated: "functions are being rolled into the 'docker' module"
author: Pavel Antonov author: Pavel Antonov
version_added: "1.5" version_added: "1.5"
short_description: manage docker images short_description: manage docker images

View file

@ -126,6 +126,12 @@ options:
required: false required: false
default: null default: null
aliases: [] aliases: []
email:
description:
- Set remote API email
required: false
default: null
aliases: []
hostname: hostname:
description: description:
- Set container hostname - Set container hostname
@ -204,6 +210,27 @@ options:
default: '' default: ''
aliases: [] aliases: []
version_added: "1.8" version_added: "1.8"
restart_policy:
description:
- Set the container restart policy
required: false
default: false
aliases: []
version_added: "1.9"
restart_policy_retry:
description:
- Set the retry limit for container restart policy
required: false
default: false
aliases: []
version_added: "1.9"
insecure_registry:
description:
- Use insecure private registry by HTTP instead of HTTPS (needed for docker-py >= 0.5.0).
required: false
default: false
aliases: []
version_added: "1.9"
author: Cove Schneider, Joshua Conner, Pavel Antonov author: Cove Schneider, Joshua Conner, Pavel Antonov
requirements: [ "docker-py >= 0.3.0", "docker >= 0.10.0" ] requirements: [ "docker-py >= 0.3.0", "docker >= 0.10.0" ]
@ -336,9 +363,10 @@ try:
except ImportError, e: except ImportError, e:
HAS_DOCKER_PY = False HAS_DOCKER_PY = False
try: if HAS_DOCKER_PY:
try:
from docker.errors import APIError as DockerAPIError from docker.errors import APIError as DockerAPIError
except ImportError: except ImportError:
from docker.client import APIError as DockerAPIError from docker.client import APIError as DockerAPIError
@ -369,9 +397,81 @@ def _docker_id_quirk(inspect):
del inspect['ID'] del inspect['ID']
return inspect return inspect
class DockerManager:
def get_split_image_tag(image):
# If image contains a host or org name, omit that from our check
if '/' in image:
registry, resource = image.rsplit('/', 1)
else:
registry, resource = None, image
# now we can determine if image has a tag
if ':' in resource:
resource, tag = resource.split(':', 1)
if registry:
resource = '/'.join((registry, resource))
else:
tag = "latest"
resource = image
return resource, tag
def get_docker_py_versioninfo():
if hasattr(docker, '__version__'):
# a '__version__' attribute was added to the module but not until
# after 0.3.0 was pushed to pypi. If it's there, use it.
version = []
for part in docker.__version__.split('.'):
try:
version.append(int(part))
except ValueError:
for idx, char in enumerate(part):
if not char.isdigit():
nondigit = part[idx:]
digit = part[:idx]
if digit:
version.append(int(digit))
if nondigit:
version.append(nondigit)
elif hasattr(docker.Client, '_get_raw_response_socket'):
# HACK: if '__version__' isn't there, we check for the existence of
# `_get_raw_response_socket` in the docker.Client class, which was
# added in 0.3.0
version = (0, 3, 0)
else:
# This is untrue but this module does not function with a version less
# than 0.3.0 so it's okay to lie here.
version = (0,)
return tuple(version)
def check_dependencies(module):
"""
Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a
helpful error message if it isn't.
"""
if not HAS_DOCKER_PY:
module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.")
else:
versioninfo = get_docker_py_versioninfo()
if versioninfo < (0, 3, 0):
module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.")
class DockerManager(object):
counters = {'created':0, 'started':0, 'stopped':0, 'killed':0, 'removed':0, 'restarted':0, 'pull':0} counters = {'created':0, 'started':0, 'stopped':0, 'killed':0, 'removed':0, 'restarted':0, 'pull':0}
_capabilities = set()
# Map optional parameters to minimum (docker-py version, server APIVersion)
# docker-py version is a tuple of ints because we have to compare them
# server APIVersion is passed to a docker-py function that takes strings
_cap_ver_req = {
'dns': ((0, 3, 0), '1.10'),
'volumes_from': ((0, 3, 0), '1.10'),
'restart_policy': ((0, 5, 0), '1.14'),
# Clientside only
'insecure_registry': ((0, 5, 0), '0.0')
}
def __init__(self, module): def __init__(self, module):
self.module = module self.module = module
@ -424,8 +524,50 @@ class DockerManager:
# connect to docker server # connect to docker server
docker_url = urlparse(module.params.get('docker_url')) docker_url = urlparse(module.params.get('docker_url'))
docker_api_version = module.params.get('docker_api_version') docker_api_version = module.params.get('docker_api_version')
if not docker_api_version:
docker_api_version=docker.client.DEFAULT_DOCKER_API_VERSION
self.client = docker.Client(base_url=docker_url.geturl(), version=docker_api_version) self.client = docker.Client(base_url=docker_url.geturl(), version=docker_api_version)
self.docker_py_versioninfo = get_docker_py_versioninfo()
def _check_capabilties(self):
"""
Create a list of available capabilities
"""
api_version = self.client.version()['ApiVersion']
for cap, req_vers in self._cap_ver_req.items():
if (self.docker_py_versioninfo >= req_vers[0] and
docker.utils.compare_version(req_vers[1], api_version) >= 0):
self._capabilities.add(cap)
def ensure_capability(self, capability, fail=True):
"""
Some of the functionality this ansible module implements are only
available in newer versions of docker. Ensure that the capability
is available here.
If fail is set to False then return True or False depending on whether
we have the capability. Otherwise, simply fail and exit the module if
we lack the capability.
"""
if not self._capabilities:
self._check_capabilties()
if capability in self._capabilities:
return True
if not fail:
return False
api_version = self.client.version()['ApiVersion']
self.module.fail_json(msg='Specifying the `%s` parameter requires'
' docker-py: %s, docker server apiversion %s; found'
' docker-py: %s, server: %s' % (
capability,
'.'.join(self._cap_ver_req[capability][0]),
self._cap_ver_req[capability][1],
'.'.join(self.docker_py_versioninfo),
api_version))
def get_links(self, links): def get_links(self, links):
""" """
@ -505,24 +647,6 @@ class DockerManager:
return binds return binds
def get_split_image_tag(self, image):
# If image contains a host or org name, omit that from our check
if '/' in image:
registry, resource = image.rsplit('/', 1)
else:
registry, resource = None, image
# now we can determine if image has a tag
if ':' in resource:
resource, tag = resource.split(':', 1)
if registry:
resource = '/'.join((registry, resource))
else:
tag = "latest"
resource = image
return resource, tag
def get_summary_counters_msg(self): def get_summary_counters_msg(self):
msg = "" msg = ""
for k, v in self.counters.iteritems(): for k, v in self.counters.iteritems():
@ -562,10 +686,10 @@ class DockerManager:
# if we weren't given a tag with the image, we need to only compare on the image name, as that # if we weren't given a tag with the image, we need to only compare on the image name, as that
# docker will give us back the full image name including a tag in the container list if one exists. # docker will give us back the full image name including a tag in the container list if one exists.
image, tag = self.get_split_image_tag(image) image, tag = get_split_image_tag(image)
for i in self.client.containers(all=True): for i in self.client.containers(all=True):
running_image, running_tag = self.get_split_image_tag(i['Image']) running_image, running_tag = get_split_image_tag(i['Image'])
running_command = i['Command'].strip() running_command = i['Command'].strip()
name_matches = False name_matches = False
@ -604,11 +728,20 @@ class DockerManager:
'name': self.module.params.get('name'), 'name': self.module.params.get('name'),
'stdin_open': self.module.params.get('stdin_open'), 'stdin_open': self.module.params.get('stdin_open'),
'tty': self.module.params.get('tty'), 'tty': self.module.params.get('tty'),
'dns': self.module.params.get('dns'),
'volumes_from': self.module.params.get('volumes_from'),
} }
if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) < 0: if params['dns'] is not None:
params['dns'] = self.module.params.get('dns') self.ensure_capability('dns')
params['volumes_from'] = self.module.params.get('volumes_from')
if params['volumes_from'] is not None:
self.ensure_capability('volumes_from')
extra_params = {}
if self.module.params.get('insecure_registry'):
if self.ensure_capability('insecure_registry', fail=False):
extra_params['insecure_registry'] = self.module.params.get('insecure_registry')
def do_create(count, params): def do_create(count, params):
results = [] results = []
@ -623,7 +756,7 @@ class DockerManager:
containers = do_create(count, params) containers = do_create(count, params)
except: except:
resource = self.module.params.get('image') resource = self.module.params.get('image')
image, tag = self.get_split_image_tag(resource) image, tag = get_split_image_tag(resource)
if self.module.params.get('username'): if self.module.params.get('username'):
try: try:
self.client.login( self.client.login(
@ -635,7 +768,7 @@ class DockerManager:
except: except:
self.module.fail_json(msg="failed to login to the remote registry, check your username/password.") self.module.fail_json(msg="failed to login to the remote registry, check your username/password.")
try: try:
self.client.pull(image, tag=tag) self.client.pull(image, tag=tag, **extra_params)
except: except:
self.module.fail_json(msg="failed to pull the specified image: %s" % resource) self.module.fail_json(msg="failed to pull the specified image: %s" % resource)
self.increment_counter('pull') self.increment_counter('pull')
@ -653,9 +786,24 @@ class DockerManager:
'links': self.links, 'links': self.links,
'network_mode': self.module.params.get('net'), 'network_mode': self.module.params.get('net'),
} }
if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) >= 0 and hasattr(docker, '__version__') and docker.__version__ > '0.3.0':
params['dns'] = self.module.params.get('dns') optionals = {}
params['volumes_from'] = self.module.params.get('volumes_from') for optional_param in ('dns', 'volumes_from', 'restart_policy', 'restart_policy_retry'):
optionals[optional_param] = self.module.params.get(optional_param)
if optionals['dns'] is not None:
self.ensure_capability('dns')
params['dns'] = optionals['dns']
if optionals['volumes_from'] is not None:
self.ensure_capability('volumes_from')
params['volumes_from'] = optionals['volumes_from']
if optionals['restart_policy'] is not None:
self.ensure_capability('restart_policy')
params['restart_policy'] = { 'Name': optionals['restart_policy'] }
if params['restart_policy']['Name'] == 'on-failure':
params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry']
for i in containers: for i in containers:
self.client.start(i['Id'], **params) self.client.start(i['Id'], **params)
@ -684,31 +832,6 @@ class DockerManager:
self.increment_counter('restarted') self.increment_counter('restarted')
def check_dependencies(module):
"""
Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a
helpful error message if it isn't.
"""
if not HAS_DOCKER_PY:
module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.")
else:
HAS_NEW_ENOUGH_DOCKER_PY = False
if hasattr(docker, '__version__'):
# a '__version__' attribute was added to the module but not until
# after 0.3.0 was added pushed to pip. If it's there, use it.
if docker.__version__ >= '0.3.0':
HAS_NEW_ENOUGH_DOCKER_PY = True
else:
# HACK: if '__version__' isn't there, we check for the existence of
# `_get_raw_response_socket` in the docker.Client class, which was
# added in 0.3.0
if hasattr(docker.Client, '_get_raw_response_socket'):
HAS_NEW_ENOUGH_DOCKER_PY = True
if not HAS_NEW_ENOUGH_DOCKER_PY:
module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.")
def main(): def main():
module = AnsibleModule( module = AnsibleModule(
argument_spec = dict( argument_spec = dict(
@ -724,7 +847,7 @@ def main():
memory_limit = dict(default=0), memory_limit = dict(default=0),
memory_swap = dict(default=0), memory_swap = dict(default=0),
docker_url = dict(default='unix://var/run/docker.sock'), docker_url = dict(default='unix://var/run/docker.sock'),
docker_api_version = dict(default=docker.client.DEFAULT_DOCKER_API_VERSION), docker_api_version = dict(),
username = dict(default=None), username = dict(default=None),
password = dict(), password = dict(),
email = dict(), email = dict(),
@ -734,13 +857,16 @@ def main():
dns = dict(), dns = dict(),
detach = dict(default=True, type='bool'), detach = dict(default=True, type='bool'),
state = dict(default='running', choices=['absent', 'present', 'running', 'stopped', 'killed', 'restarted']), state = dict(default='running', choices=['absent', 'present', 'running', 'stopped', 'killed', 'restarted']),
restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']),
restart_policy_retry = dict(default=0, type='int'),
debug = dict(default=False, type='bool'), debug = dict(default=False, type='bool'),
privileged = dict(default=False, type='bool'), privileged = dict(default=False, type='bool'),
stdin_open = dict(default=False, type='bool'), stdin_open = dict(default=False, type='bool'),
tty = dict(default=False, type='bool'), tty = dict(default=False, type='bool'),
lxc_conf = dict(default=None, type='list'), lxc_conf = dict(default=None, type='list'),
name = dict(default=None), name = dict(default=None),
net = dict(default=None) net = dict(default=None),
insecure_registry = dict(default=False, type='bool'),
) )
) )
@ -851,4 +977,5 @@ def main():
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
main() if __name__ == '__main__':
main()

0
cloud/google/__init__.py Normal file
View file

View file

@ -319,11 +319,12 @@ def handle_create(module, gs, bucket, obj):
else: else:
module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket)) module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket))
if bucket and obj: if bucket and obj:
if bucket_check(module, gs, bucket):
if obj.endswith('/'): if obj.endswith('/'):
dirobj = obj dirobj = obj
else: else:
dirobj = obj + "/" dirobj = obj + "/"
if bucket_check(module, gs, bucket):
if key_check(module, gs, bucket, dirobj): if key_check(module, gs, bucket, dirobj):
module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False) module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
else: else:

0
cloud/gce.py → cloud/google/gce.py Executable file → Normal file
View file

View file

@ -110,6 +110,7 @@ EXAMPLES = '''
- local_action: - local_action:
module: gce_net module: gce_net
name: privatenet name: privatenet
fwname: all-web-webproxy
allowed: tcp:80,8080 allowed: tcp:80,8080
src_tags: ["web", "proxy"] src_tags: ["web", "proxy"]
@ -155,7 +156,7 @@ def main():
ipv4_range = dict(), ipv4_range = dict(),
fwname = dict(), fwname = dict(),
name = dict(), name = dict(),
src_range = dict(), src_range = dict(type='list'),
src_tags = dict(type='list'), src_tags = dict(type='list'),
state = dict(default='present'), state = dict(default='present'),
service_account_email = dict(), service_account_email = dict(),

0
cloud/linode/__init__.py Normal file
View file

View file

View file

@ -254,7 +254,7 @@ def main():
else: else:
_glance_delete_image(module, module.params, client) _glance_delete_image(module, module.params, client)
# this is magic, see lib/ansible/module.params['common.py # this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.openstack import * from ansible.module_utils.openstack import *
main() main()

View file

@ -291,6 +291,9 @@ def main():
argument_spec.update(dict( argument_spec.update(dict(
tenant_description=dict(required=False), tenant_description=dict(required=False),
email=dict(required=False), email=dict(required=False),
user=dict(required=False),
tenant=dict(required=False),
password=dict(required=False),
role=dict(required=False), role=dict(required=False),
state=dict(default='present', choices=['present', 'absent']), state=dict(default='present', choices=['present', 'absent']),
endpoint=dict(required=False, endpoint=dict(required=False,

View file

@ -121,10 +121,10 @@ options:
description: description:
- Should a floating ip be auto created and assigned - Should a floating ip be auto created and assigned
required: false required: false
default: 'yes' default: 'no'
version_added: "1.8" version_added: "1.8"
floating_ips: floating_ips:
decription: description:
- list of valid floating IPs that pre-exist to assign to this node - list of valid floating IPs that pre-exist to assign to this node
required: false required: false
default: None default: None

View file

View file

@ -64,7 +64,10 @@ options:
exact_count: exact_count:
description: description:
- Explicitly ensure an exact count of instances, used with - Explicitly ensure an exact count of instances, used with
state=active/present state=active/present. If specified as C(yes) and I(count) is less than
the servers matched, servers will be deleted to match the count. If
the number of matched servers is fewer than specified in I(count)
additional servers will be added.
default: no default: no
choices: choices:
- "yes" - "yes"
@ -150,6 +153,12 @@ options:
- how long before wait gives up, in seconds - how long before wait gives up, in seconds
default: 300 default: 300
author: Jesse Keating, Matt Martz author: Jesse Keating, Matt Martz
notes:
- I(exact_count) can be "destructive" if the number of running servers in
the I(group) is larger than that specified in I(count). In such a case, the
I(state) is effectively set to C(absent) and the extra servers are deleted.
In the case of deletion, the returned data structure will have C(action)
set to C(delete), and the oldest servers in the group will be deleted.
extends_documentation_fragment: rackspace.openstack extends_documentation_fragment: rackspace.openstack
''' '''
@ -441,10 +450,12 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
if group is None: if group is None:
module.fail_json(msg='"group" must be provided when using ' module.fail_json(msg='"group" must be provided when using '
'"exact_count"') '"exact_count"')
else:
if auto_increment: if auto_increment:
numbers = set() numbers = set()
# See if the name is a printf like string, if not append
# %d to the end
try: try:
name % 0 name % 0
except TypeError, e: except TypeError, e:
@ -453,8 +464,12 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
else: else:
module.fail_json(msg=e.message) module.fail_json(msg=e.message)
# regex pattern to match printf formatting
pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
for server in cs.servers.list(): for server in cs.servers.list():
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if server.metadata.get('group') == group: if server.metadata.get('group') == group:
servers.append(server) servers.append(server)
match = re.search(pattern, server.name) match = re.search(pattern, server.name)
@ -465,10 +480,15 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
number_range = xrange(count_offset, count_offset + count) number_range = xrange(count_offset, count_offset + count)
available_numbers = list(set(number_range) available_numbers = list(set(number_range)
.difference(numbers)) .difference(numbers))
else: else: # Not auto incrementing
for server in cs.servers.list(): for server in cs.servers.list():
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if server.metadata.get('group') == group: if server.metadata.get('group') == group:
servers.append(server) servers.append(server)
# available_numbers not needed here, we inspect auto_increment
# again later
# If state was absent but the count was changed, # If state was absent but the count was changed,
# assume we only wanted to remove that number of instances # assume we only wanted to remove that number of instances
@ -480,6 +500,8 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
count = diff count = diff
if len(servers) > count: if len(servers) > count:
# We have more servers than we need, set state='absent'
# and delete the extras, this should delete the oldest
state = 'absent' state = 'absent'
kept = servers[:count] kept = servers[:count]
del servers[:count] del servers[:count]
@ -489,15 +511,21 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
delete(module, instance_ids=instance_ids, wait=wait, delete(module, instance_ids=instance_ids, wait=wait,
wait_timeout=wait_timeout, kept=kept) wait_timeout=wait_timeout, kept=kept)
elif len(servers) < count: elif len(servers) < count:
# we have fewer servers than we need
if auto_increment: if auto_increment:
# auto incrementing server numbers
names = [] names = []
name_slice = count - len(servers) name_slice = count - len(servers)
numbers_to_use = available_numbers[:name_slice] numbers_to_use = available_numbers[:name_slice]
for number in numbers_to_use: for number in numbers_to_use:
names.append(name % number) names.append(name % number)
else: else:
# We are not auto incrementing server numbers,
# create a list of 'name' that matches how many we need
names = [name] * (count - len(servers)) names = [name] * (count - len(servers))
else: else:
# we have the right number of servers, just return info
# about all of the matched servers
instances = [] instances = []
instance_ids = [] instance_ids = []
for server in servers: for server in servers:
@ -509,11 +537,15 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
instance_ids={'instances': instance_ids, instance_ids={'instances': instance_ids,
'success': [], 'error': [], 'success': [], 'error': [],
'timeout': []}) 'timeout': []})
else: else: # not called with exact_count=True
if group is not None: if group is not None:
if auto_increment: if auto_increment:
# we are auto incrementing server numbers, but not with
# exact_count
numbers = set() numbers = set()
# See if the name is a printf like string, if not append
# %d to the end
try: try:
name % 0 name % 0
except TypeError, e: except TypeError, e:
@ -522,8 +554,12 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
else: else:
module.fail_json(msg=e.message) module.fail_json(msg=e.message)
# regex pattern to match printf formatting
pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
for server in cs.servers.list(): for server in cs.servers.list():
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if server.metadata.get('group') == group: if server.metadata.get('group') == group:
servers.append(server) servers.append(server)
match = re.search(pattern, server.name) match = re.search(pattern, server.name)
@ -540,8 +576,11 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
for number in numbers_to_use: for number in numbers_to_use:
names.append(name % number) names.append(name % number)
else: else:
# Not auto incrementing
names = [name] * count names = [name] * count
else: else:
# No group was specified, and not using exact_count
# Perform more simplistic matching
search_opts = { search_opts = {
'name': '^%s$' % name, 'name': '^%s$' % name,
'image': image, 'image': image,
@ -549,11 +588,18 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
} }
servers = [] servers = []
for server in cs.servers.list(search_opts=search_opts): for server in cs.servers.list(search_opts=search_opts):
# Ignore DELETED servers
if server.status == 'DELETED':
continue
# Ignore servers with non matching metadata
if server.metadata != meta: if server.metadata != meta:
continue continue
servers.append(server) servers.append(server)
if len(servers) >= count: if len(servers) >= count:
# We have more servers than were requested, don't do
# anything. Not running with exact_count=True, so we assume
# more is OK
instances = [] instances = []
for server in servers: for server in servers:
instances.append(rax_to_dict(server, 'server')) instances.append(rax_to_dict(server, 'server'))
@ -566,6 +612,8 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
'success': [], 'error': [], 'success': [], 'error': [],
'timeout': []}) 'timeout': []})
# We need more servers to reach out target, create names for
# them, we aren't performing auto_increment here
names = [name] * (count - len(servers)) names = [name] * (count - len(servers))
create(module, names=names, flavor=flavor, image=image, create(module, names=names, flavor=flavor, image=image,
@ -577,6 +625,8 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
elif state == 'absent': elif state == 'absent':
if instance_ids is None: if instance_ids is None:
# We weren't given an explicit list of server IDs to delete
# Let's match instead
for arg, value in dict(name=name, flavor=flavor, for arg, value in dict(name=name, flavor=flavor,
image=image).iteritems(): image=image).iteritems():
if not value: if not value:
@ -588,10 +638,15 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
'flavor': flavor 'flavor': flavor
} }
for server in cs.servers.list(search_opts=search_opts): for server in cs.servers.list(search_opts=search_opts):
# Ignore DELETED servers
if server.status == 'DELETED':
continue
# Ignore servers with non matching metadata
if meta != server.metadata: if meta != server.metadata:
continue continue
servers.append(server) servers.append(server)
# Build a list of server IDs to delete
instance_ids = [] instance_ids = []
for server in servers: for server in servers:
if len(instance_ids) < count: if len(instance_ids) < count:
@ -600,6 +655,8 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
break break
if not instance_ids: if not instance_ids:
# No server IDs were matched for deletion, or no IDs were
# explicitly provided, just exit and don't do anything
module.exit_json(changed=False, action=None, instances=[], module.exit_json(changed=False, action=None, instances=[],
success=[], error=[], timeout=[], success=[], error=[], timeout=[],
instance_ids={'instances': [], instance_ids={'instances': [],

View file

@ -108,10 +108,6 @@ except ImportError:
def cloud_block_storage(module, state, name, description, meta, size, def cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout): snapshot_id, volume_type, wait, wait_timeout):
for arg in (state, name, size, volume_type):
if not arg:
module.fail_json(msg='%s is required for rax_cbs' % arg)
if size < 100: if size < 100:
module.fail_json(msg='"size" must be greater than or equal to 100') module.fail_json(msg='"size" must be greater than or equal to 100')
@ -145,10 +141,7 @@ def cloud_block_storage(module, state, name, description, meta, size,
attempts=attempts) attempts=attempts)
volume.get() volume.get()
for key, value in vars(volume).iteritems(): instance = rax_to_dict(volume)
if (isinstance(value, NON_CALLABLES) and
not key.startswith('_')):
instance[key] = value
result = dict(changed=changed, volume=instance) result = dict(changed=changed, volume=instance)
@ -164,6 +157,7 @@ def cloud_block_storage(module, state, name, description, meta, size,
elif state == 'absent': elif state == 'absent':
if volume: if volume:
instance = rax_to_dict(volume)
try: try:
volume.delete() volume.delete()
changed = True changed = True

View file

@ -90,11 +90,6 @@ except ImportError:
def cloud_block_storage_attachments(module, state, volume, server, device, def cloud_block_storage_attachments(module, state, volume, server, device,
wait, wait_timeout): wait, wait_timeout):
for arg in (state, volume, server, device):
if not arg:
module.fail_json(msg='%s is required for rax_cbs_attachments' %
arg)
cbs = pyrax.cloud_blockstorage cbs = pyrax.cloud_blockstorage
cs = pyrax.cloudservers cs = pyrax.cloudservers
@ -133,7 +128,7 @@ def cloud_block_storage_attachments(module, state, volume, server, device,
not key.startswith('_')): not key.startswith('_')):
instance[key] = value instance[key] = value
result = dict(changed=changed, volume=instance) result = dict(changed=changed)
if volume.status == 'error': if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id result['msg'] = '%s failed to build' % volume.id
@ -142,6 +137,9 @@ def cloud_block_storage_attachments(module, state, volume, server, device,
pyrax.utils.wait_until(volume, 'status', 'in-use', pyrax.utils.wait_until(volume, 'status', 'in-use',
interval=5, attempts=attempts) interval=5, attempts=attempts)
volume.get()
result['volume'] = rax_to_dict(volume)
if 'msg' in result: if 'msg' in result:
module.fail_json(**result) module.fail_json(**result)
else: else:
@ -167,12 +165,7 @@ def cloud_block_storage_attachments(module, state, volume, server, device,
elif volume.attachments: elif volume.attachments:
module.fail_json(msg='Volume is attached to another server') module.fail_json(msg='Volume is attached to another server')
for key, value in vars(volume).iteritems(): result = dict(changed=changed, volume=rax_to_dict(volume))
if (isinstance(value, NON_CALLABLES) and
not key.startswith('_')):
instance[key] = value
result = dict(changed=changed, volume=instance)
if volume.status == 'error': if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id result['msg'] = '%s failed to build' % volume.id

View file

@ -140,10 +140,6 @@ except ImportError:
def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
vip_type, timeout, wait, wait_timeout, vip_id): vip_type, timeout, wait, wait_timeout, vip_id):
for arg in (state, name, port, protocol, vip_type):
if not arg:
module.fail_json(msg='%s is required for rax_clb' % arg)
if int(timeout) < 30: if int(timeout) < 30:
module.fail_json(msg='"timeout" must be greater than or equal to 30') module.fail_json(msg='"timeout" must be greater than or equal to 30')
@ -257,7 +253,7 @@ def main():
algorithm=dict(choices=CLB_ALGORITHMS, algorithm=dict(choices=CLB_ALGORITHMS,
default='LEAST_CONNECTIONS'), default='LEAST_CONNECTIONS'),
meta=dict(type='dict', default={}), meta=dict(type='dict', default={}),
name=dict(), name=dict(required=True),
port=dict(type='int', default=80), port=dict(type='int', default=80),
protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'), protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'),
state=dict(default='present', choices=['present', 'absent']), state=dict(default='present', choices=['present', 'absent']),

View file

@ -150,21 +150,6 @@ def _get_node(lb, node_id=None, address=None, port=None):
return None return None
def _is_primary(node):
"""Return True if node is primary and enabled"""
return (node.type.lower() == 'primary' and
node.condition.lower() == 'enabled')
def _get_primary_nodes(lb):
"""Return a list of primary and enabled nodes"""
nodes = []
for node in lb.nodes:
if _is_primary(node):
nodes.append(node)
return nodes
def main(): def main():
argument_spec = rax_argument_spec() argument_spec = rax_argument_spec()
argument_spec.update( argument_spec.update(
@ -230,13 +215,6 @@ def main():
if state == 'absent': if state == 'absent':
if not node: # Removing a non-existent node if not node: # Removing a non-existent node
module.exit_json(changed=False, state=state) module.exit_json(changed=False, state=state)
# The API detects this as well but currently pyrax does not return a
# meaningful error message
if _is_primary(node) and len(_get_primary_nodes(lb)) == 1:
module.fail_json(
msg='At least one primary node has to be enabled')
try: try:
lb.delete_node(node) lb.delete_node(node)
result = {} result = {}
@ -299,5 +277,5 @@ def main():
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.rax import * from ansible.module_utils.rax import *
### invoke the module # invoke the module
main() main()

View file

@ -55,10 +55,6 @@ except ImportError:
def cloud_identity(module, state, identity): def cloud_identity(module, state, identity):
for arg in (state, identity):
if not arg:
module.fail_json(msg='%s is required for rax_identity' % arg)
instance = dict( instance = dict(
authenticated=identity.authenticated, authenticated=identity.authenticated,
credentials=identity._creds_file credentials=identity._creds_file
@ -79,7 +75,7 @@ def main():
argument_spec = rax_argument_spec() argument_spec = rax_argument_spec()
argument_spec.update( argument_spec.update(
dict( dict(
state=dict(default='present', choices=['present', 'absent']) state=dict(default='present', choices=['present'])
) )
) )
@ -95,7 +91,7 @@ def main():
setup_rax_module(module, pyrax) setup_rax_module(module, pyrax)
if pyrax.identity is None: if not pyrax.identity:
module.fail_json(msg='Failed to instantiate client. This ' module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an ' 'typically indicates an invalid region or an '
'incorrectly capitalized region name.') 'incorrectly capitalized region name.')
@ -106,5 +102,5 @@ def main():
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.rax import * from ansible.module_utils.rax import *
### invoke the module # invoke the module
main() main()

View file

@ -104,7 +104,7 @@ def rax_keypair(module, name, public_key, state):
keypair = {} keypair = {}
if state == 'present': if state == 'present':
if os.path.isfile(public_key): if public_key and os.path.isfile(public_key):
try: try:
f = open(public_key) f = open(public_key)
public_key = f.read() public_key = f.read()
@ -143,7 +143,7 @@ def main():
argument_spec = rax_argument_spec() argument_spec = rax_argument_spec()
argument_spec.update( argument_spec.update(
dict( dict(
name=dict(), name=dict(required=True),
public_key=dict(), public_key=dict(),
state=dict(default='present', choices=['absent', 'present']), state=dict(default='present', choices=['absent', 'present']),
) )

View file

@ -65,10 +65,6 @@ except ImportError:
def cloud_network(module, state, label, cidr): def cloud_network(module, state, label, cidr):
for arg in (state, label, cidr):
if not arg:
module.fail_json(msg='%s is required for cloud_networks' % arg)
changed = False changed = False
network = None network = None
networks = [] networks = []
@ -79,6 +75,9 @@ def cloud_network(module, state, label, cidr):
'incorrectly capitalized region name.') 'incorrectly capitalized region name.')
if state == 'present': if state == 'present':
if not cidr:
module.fail_json(msg='missing required arguments: cidr')
try: try:
network = pyrax.cloud_networks.find_network_by_label(label) network = pyrax.cloud_networks.find_network_by_label(label)
except pyrax.exceptions.NetworkNotFound: except pyrax.exceptions.NetworkNotFound:
@ -115,7 +114,7 @@ def main():
dict( dict(
state=dict(default='present', state=dict(default='present',
choices=['present', 'absent']), choices=['present', 'absent']),
label=dict(), label=dict(required=True),
cidr=dict() cidr=dict()
) )
) )

View file

@ -24,6 +24,14 @@ description:
- Manipulate Rackspace Cloud Autoscale Groups - Manipulate Rackspace Cloud Autoscale Groups
version_added: 1.7 version_added: 1.7
options: options:
config_drive:
description:
- Attach read-only configuration drive to server as label config-2
default: no
choices:
- "yes"
- "no"
version_added: 1.8
cooldown: cooldown:
description: description:
- The period of time, in seconds, that must pass before any scaling can - The period of time, in seconds, that must pass before any scaling can
@ -92,6 +100,11 @@ options:
- present - present
- absent - absent
default: present default: present
user_data:
description:
- Data to be uploaded to the servers config drive. This option implies
I(config_drive). Can be a file path or a string
version_added: 1.8
author: Matt Martz author: Matt Martz
extends_documentation_fragment: rackspace extends_documentation_fragment: rackspace
''' '''
@ -118,6 +131,8 @@ EXAMPLES = '''
register: asg register: asg
''' '''
import base64
try: try:
import pyrax import pyrax
HAS_PYRAX = True HAS_PYRAX = True
@ -128,17 +143,27 @@ except ImportError:
def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
image=None, key_name=None, loadbalancers=[], meta={}, image=None, key_name=None, loadbalancers=[], meta={},
min_entities=0, max_entities=0, name=None, networks=[], min_entities=0, max_entities=0, name=None, networks=[],
server_name=None, state='present'): server_name=None, state='present', user_data=None,
config_drive=False):
changed = False changed = False
au = pyrax.autoscale au = pyrax.autoscale
cnw = pyrax.cloud_networks if not au:
cs = pyrax.cloudservers
if not au or not cnw or not cs:
module.fail_json(msg='Failed to instantiate clients. This ' module.fail_json(msg='Failed to instantiate clients. This '
'typically indicates an invalid region or an ' 'typically indicates an invalid region or an '
'incorrectly capitalized region name.') 'incorrectly capitalized region name.')
if user_data:
config_drive = True
if user_data and os.path.isfile(user_data):
try:
f = open(user_data)
user_data = f.read()
f.close()
except Exception, e:
module.fail_json(msg='Failed to load %s' % user_data)
if state == 'present': if state == 'present':
# Normalize and ensure all metadata values are strings # Normalize and ensure all metadata values are strings
if meta: if meta:
@ -184,8 +209,16 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
lbs = [] lbs = []
if loadbalancers: if loadbalancers:
for lb in loadbalancers: for lb in loadbalancers:
lb_id = lb.get('id') try:
port = lb.get('port') lb_id = int(lb.get('id'))
except (ValueError, TypeError):
module.fail_json(msg='Load balancer ID is not an integer: '
'%s' % lb.get('id'))
try:
port = int(lb.get('port'))
except (ValueError, TypeError):
module.fail_json(msg='Load balancer port is not an '
'integer: %s' % lb.get('port'))
if not lb_id or not port: if not lb_id or not port:
continue continue
lbs.append((lb_id, port)) lbs.append((lb_id, port))
@ -202,9 +235,10 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
launch_config_type='launch_server', launch_config_type='launch_server',
server_name=server_name, image=image, server_name=server_name, image=image,
flavor=flavor, disk_config=disk_config, flavor=flavor, disk_config=disk_config,
metadata=meta, personality=files, metadata=meta, personality=personality,
networks=nics, load_balancers=lbs, networks=nics, load_balancers=lbs,
key_name=key_name) key_name=key_name, config_drive=config_drive,
user_data=user_data)
changed = True changed = True
except Exception, e: except Exception, e:
module.fail_json(msg='%s' % e.message) module.fail_json(msg='%s' % e.message)
@ -237,14 +271,23 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
if flavor != lc.get('flavor'): if flavor != lc.get('flavor'):
lc_args['flavor'] = flavor lc_args['flavor'] = flavor
if disk_config != lc.get('disk_config'): disk_config = disk_config or 'AUTO'
if ((disk_config or lc.get('disk_config')) and
disk_config != lc.get('disk_config')):
lc_args['disk_config'] = disk_config lc_args['disk_config'] = disk_config
if meta != lc.get('metadata'): if (meta or lc.get('meta')) and meta != lc.get('metadata'):
lc_args['metadata'] = meta lc_args['metadata'] = meta
if files != lc.get('personality'): test_personality = []
lc_args['personality'] = files for p in personality:
test_personality.append({
'path': p['path'],
'contents': base64.b64encode(p['contents'])
})
if ((test_personality or lc.get('personality')) and
test_personality != lc.get('personality')):
lc_args['personality'] = personality
if nics != lc.get('networks'): if nics != lc.get('networks'):
lc_args['networks'] = nics lc_args['networks'] = nics
@ -256,6 +299,13 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
if key_name != lc.get('key_name'): if key_name != lc.get('key_name'):
lc_args['key_name'] = key_name lc_args['key_name'] = key_name
if config_drive != lc.get('config_drive'):
lc_args['config_drive'] = config_drive
if (user_data and
base64.b64encode(user_data) != lc.get('user_data')):
lc_args['user_data'] = user_data
if lc_args: if lc_args:
# Work around for https://github.com/rackspace/pyrax/pull/389 # Work around for https://github.com/rackspace/pyrax/pull/389
if 'flavor' not in lc_args: if 'flavor' not in lc_args:
@ -284,9 +334,10 @@ def main():
argument_spec = rax_argument_spec() argument_spec = rax_argument_spec()
argument_spec.update( argument_spec.update(
dict( dict(
config_drive=dict(default=False, type='bool'),
cooldown=dict(type='int', default=300), cooldown=dict(type='int', default=300),
disk_config=dict(choices=['auto', 'manual']), disk_config=dict(choices=['auto', 'manual']),
files=dict(type='list', default=[]), files=dict(type='dict', default={}),
flavor=dict(required=True), flavor=dict(required=True),
image=dict(required=True), image=dict(required=True),
key_name=dict(), key_name=dict(),
@ -298,6 +349,7 @@ def main():
networks=dict(type='list', default=['public', 'private']), networks=dict(type='list', default=['public', 'private']),
server_name=dict(required=True), server_name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']), state=dict(default='present', choices=['present', 'absent']),
user_data=dict(no_log=True),
) )
) )
@ -309,6 +361,7 @@ def main():
if not HAS_PYRAX: if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module') module.fail_json(msg='pyrax is required for this module')
config_drive = module.params.get('config_drive')
cooldown = module.params.get('cooldown') cooldown = module.params.get('cooldown')
disk_config = module.params.get('disk_config') disk_config = module.params.get('disk_config')
if disk_config: if disk_config:
@ -325,6 +378,7 @@ def main():
networks = module.params.get('networks') networks = module.params.get('networks')
server_name = module.params.get('server_name') server_name = module.params.get('server_name')
state = module.params.get('state') state = module.params.get('state')
user_data = module.params.get('user_data')
if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000: if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000:
module.fail_json(msg='min_entities and max_entities must be an ' module.fail_json(msg='min_entities and max_entities must be an '
@ -340,7 +394,7 @@ def main():
key_name=key_name, loadbalancers=loadbalancers, key_name=key_name, loadbalancers=loadbalancers,
min_entities=min_entities, max_entities=max_entities, min_entities=min_entities, max_entities=max_entities,
name=name, networks=networks, server_name=server_name, name=name, networks=networks, server_name=server_name,
state=state) state=state, config_drive=config_drive, user_data=user_data)
# import module snippets # import module snippets

0
cloud/vmware/__init__.py Normal file
View file

View file

@ -38,7 +38,7 @@ options:
description: description:
- The virtual server name you wish to manage. - The virtual server name you wish to manage.
required: true required: true
user: username:
description: description:
- Username to connect to vcenter as. - Username to connect to vcenter as.
required: true required: true
@ -67,7 +67,18 @@ options:
description: description:
- Indicate desired state of the vm. - Indicate desired state of the vm.
default: present default: present
choices: ['present', 'powered_on', 'absent', 'powered_off', 'restarted', 'reconfigured'] choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured']
from_template:
version_added: "1.9"
description:
- Specifies if the VM should be deployed from a template (cannot be ran with state)
default: no
choices: ['yes', 'no']
template_src:
version_added: "1.9"
description:
- Name of the source template to deploy from
default: None
vm_disk: vm_disk:
description: description:
- A key, value list of disks and their sizes and which datastore to keep it in. - A key, value list of disks and their sizes and which datastore to keep it in.
@ -181,6 +192,18 @@ EXAMPLES = '''
datacenter: MyDatacenter datacenter: MyDatacenter
hostname: esx001.mydomain.local hostname: esx001.mydomain.local
# Deploy a guest from a template
# No reconfiguration of the destination guest is done at this stage, a reconfigure would be needed to adjust memory/cpu etc..
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
from_template: yes
template_src: centosTemplate
cluster: MainCluster
resource_pool: "/Resources"
# Task to gather facts from a vSphere cluster only if the system is a VMWare guest # Task to gather facts from a vSphere cluster only if the system is a VMWare guest
- vsphere_guest: - vsphere_guest:
@ -192,12 +215,14 @@ EXAMPLES = '''
# Typical output of a vsphere_facts run on a guest # Typical output of a vsphere_facts run on a guest
# If vmware tools is not installed, ipadresses with return None
- hw_eth0: - hw_eth0:
- addresstype: "assigned" - addresstype: "assigned"
label: "Network adapter 1" label: "Network adapter 1"
macaddress: "00:22:33:33:44:55" macaddress: "00:22:33:33:44:55"
macaddress_dash: "00-22-33-33-44-55" macaddress_dash: "00-22-33-33-44-55"
ipaddresses: ['192.0.2.100', '2001:DB8:56ff:feac:4d8a']
summary: "VM Network" summary: "VM Network"
hw_guest_full_name: "newvm001" hw_guest_full_name: "newvm001"
hw_guest_id: "rhel6_64Guest" hw_guest_id: "rhel6_64Guest"
@ -207,7 +232,7 @@ EXAMPLES = '''
hw_product_uuid: "ef50bac8-2845-40ff-81d9-675315501dac" hw_product_uuid: "ef50bac8-2845-40ff-81d9-675315501dac"
# Remove a vm from vSphere # Remove a vm from vSphere
# The VM must be powered_off of you need to use force to force a shutdown # The VM must be powered_off or you need to use force to force a shutdown
- vsphere_guest: - vsphere_guest:
vcenter_hostname: vcenter.mydomain.local vcenter_hostname: vcenter.mydomain.local
@ -488,6 +513,49 @@ def vmdisk_id(vm, current_datastore_name):
return id_list return id_list
def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name):
vmTemplate = vsphere_client.get_vm_by_name(template_src)
vmTarget = None
try:
cluster = [k for k,
v in vsphere_client.get_clusters().items() if v == cluster_name][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Cluster named: %s" %
cluster_name)
try:
rpmor = [k for k, v in vsphere_client.get_resource_pools(
from_mor=cluster).items()
if v == resource_pool][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Resource Pool named: %s" %
resource_pool)
try:
vmTarget = vsphere_client.get_vm_by_name(guest)
except Exception:
pass
if not vmTemplate.properties.config.template:
module.fail_json(
msg="Target %s is not a registered template" % template_src
)
try:
if vmTarget:
changed = False
else:
vmTemplate.clone(guest, resourcepool=rpmor)
changed = True
vsphere_client.disconnect()
module.exit_json(changed=changed)
except Exception as e:
module.fail_json(
msg="Could not clone selected machine: %s" % e
)
def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force): def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force):
spec = None spec = None
changed = False changed = False
@ -618,6 +686,15 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest,
hfmor = dcprops.hostFolder._obj hfmor = dcprops.hostFolder._obj
# virtualmachineFolder managed object reference # virtualmachineFolder managed object reference
if vm_extra_config.get('folder'):
if vm_extra_config['folder'] not in vsphere_client._get_managed_objects(MORTypes.Folder).values():
vsphere_client.disconnect()
module.fail_json(msg="Cannot find folder named: %s" % vm_extra_config['folder'])
for mor, name in vsphere_client._get_managed_objects(MORTypes.Folder).iteritems():
if name == vm_extra_config['folder']:
vmfmor = mor
else:
vmfmor = dcprops.vmFolder._obj vmfmor = dcprops.vmFolder._obj
# networkFolder managed object reference # networkFolder managed object reference
@ -936,6 +1013,11 @@ def gather_facts(vm):
'hw_processor_count': vm.properties.config.hardware.numCPU, 'hw_processor_count': vm.properties.config.hardware.numCPU,
'hw_memtotal_mb': vm.properties.config.hardware.memoryMB, 'hw_memtotal_mb': vm.properties.config.hardware.memoryMB,
} }
netInfo = vm.get_property('net')
netDict = {}
if netInfo:
for net in netInfo:
netDict[net['mac_address']] = net['ip_addresses']
ifidx = 0 ifidx = 0
for entry in vm.properties.config.hardware.device: for entry in vm.properties.config.hardware.device:
@ -948,6 +1030,7 @@ def gather_facts(vm):
'addresstype': entry.addressType, 'addresstype': entry.addressType,
'label': entry.deviceInfo.label, 'label': entry.deviceInfo.label,
'macaddress': entry.macAddress, 'macaddress': entry.macAddress,
'ipaddresses': netDict.get(entry.macAddress, None),
'macaddress_dash': entry.macAddress.replace(':', '-'), 'macaddress_dash': entry.macAddress.replace(':', '-'),
'summary': entry.deviceInfo.summary, 'summary': entry.deviceInfo.summary,
} }
@ -1066,6 +1149,8 @@ def main():
], ],
default='present'), default='present'),
vmware_guest_facts=dict(required=False, choices=BOOLEANS), vmware_guest_facts=dict(required=False, choices=BOOLEANS),
from_template=dict(required=False, choices=BOOLEANS),
template_src=dict(required=False, type='str'),
guest=dict(required=True, type='str'), guest=dict(required=True, type='str'),
vm_disk=dict(required=False, type='dict', default={}), vm_disk=dict(required=False, type='dict', default={}),
vm_nic=dict(required=False, type='dict', default={}), vm_nic=dict(required=False, type='dict', default={}),
@ -1080,7 +1165,7 @@ def main():
), ),
supports_check_mode=False, supports_check_mode=False,
mutually_exclusive=[['state', 'vmware_guest_facts']], mutually_exclusive=[['state', 'vmware_guest_facts'],['state', 'from_template']],
required_together=[ required_together=[
['state', 'force'], ['state', 'force'],
[ [
@ -1090,7 +1175,8 @@ def main():
'vm_hardware', 'vm_hardware',
'esxi' 'esxi'
], ],
['resource_pool', 'cluster'] ['resource_pool', 'cluster'],
['from_template', 'resource_pool', 'template_src']
], ],
) )
@ -1112,6 +1198,8 @@ def main():
esxi = module.params['esxi'] esxi = module.params['esxi']
resource_pool = module.params['resource_pool'] resource_pool = module.params['resource_pool']
cluster = module.params['cluster'] cluster = module.params['cluster']
template_src = module.params['template_src']
from_template = module.params['from_template']
# CONNECT TO THE SERVER # CONNECT TO THE SERVER
viserver = VIServer() viserver = VIServer()
@ -1135,7 +1223,6 @@ def main():
except Exception, e: except Exception, e:
module.fail_json( module.fail_json(
msg="Fact gather failed with exception %s" % e) msg="Fact gather failed with exception %s" % e)
# Power Changes # Power Changes
elif state in ['powered_on', 'powered_off', 'restarted']: elif state in ['powered_on', 'powered_off', 'restarted']:
state_result = power_state(vm, state, force) state_result = power_state(vm, state, force)
@ -1183,6 +1270,17 @@ def main():
module.fail_json( module.fail_json(
msg="No such VM %s. Fact gathering requires an existing vm" msg="No such VM %s. Fact gathering requires an existing vm"
% guest) % guest)
elif from_template:
deploy_template(
vsphere_client=viserver,
esxi=esxi,
resource_pool=resource_pool,
guest=guest,
template_src=template_src,
module=module,
cluster_name=cluster
)
if state in ['restarted', 'reconfigured']: if state in ['restarted', 'reconfigured']:
module.fail_json( module.fail_json(
msg="No such VM %s. States [" msg="No such VM %s. States ["

View file

@ -18,6 +18,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import copy
import sys import sys
import datetime import datetime
import traceback import traceback
@ -99,12 +100,22 @@ EXAMPLES = '''
creates: /path/to/database creates: /path/to/database
''' '''
# Dict of options and their defaults
OPTIONS = {'chdir': None,
'creates': None,
'executable': None,
'NO_LOG': None,
'removes': None,
'warn': True,
}
# This is a pretty complex regex, which functions as follows: # This is a pretty complex regex, which functions as follows:
# #
# 1. (^|\s) # 1. (^|\s)
# ^ look for a space or the beginning of the line # ^ look for a space or the beginning of the line
# 2. (creates|removes|chdir|executable|NO_LOG)= # 2. ({options_list})=
# ^ look for a valid param, followed by an '=' # ^ expanded to (chdir|creates|executable...)=
# look for a valid param, followed by an '='
# 3. (?P<quote>[\'"])? # 3. (?P<quote>[\'"])?
# ^ look for an optional quote character, which can either be # ^ look for an optional quote character, which can either be
# a single or double quote character, and store it for later # a single or double quote character, and store it for later
@ -114,8 +125,12 @@ EXAMPLES = '''
# ^ a non-escaped space or a non-escaped quote of the same kind # ^ a non-escaped space or a non-escaped quote of the same kind
# that was matched in the first 'quote' is found, or the end of # that was matched in the first 'quote' is found, or the end of
# the line is reached # the line is reached
OPTIONS_REGEX = '|'.join(OPTIONS.keys())
PARAM_REGEX = re.compile(r'(^|\s)(creates|removes|chdir|executable|NO_LOG|warn)=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)') PARAM_REGEX = re.compile(
r'(^|\s)({options_regex})=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)'.format(
options_regex=OPTIONS_REGEX
)
)
def check_command(commandline): def check_command(commandline):
@ -148,7 +163,7 @@ def main():
args = module.params['args'] args = module.params['args']
creates = module.params['creates'] creates = module.params['creates']
removes = module.params['removes'] removes = module.params['removes']
warn = module.params.get('warn', True) warn = module.params['warn']
if args.strip() == '': if args.strip() == '':
module.fail_json(rc=256, msg="no command given") module.fail_json(rc=256, msg="no command given")
@ -232,13 +247,8 @@ class CommandModule(AnsibleModule):
def _load_params(self): def _load_params(self):
''' read the input and return a dictionary and the arguments string ''' ''' read the input and return a dictionary and the arguments string '''
args = MODULE_ARGS args = MODULE_ARGS
params = {} params = copy.copy(OPTIONS)
params['chdir'] = None
params['creates'] = None
params['removes'] = None
params['shell'] = False params['shell'] = False
params['executable'] = None
params['warn'] = True
if "#USE_SHELL" in args: if "#USE_SHELL" in args:
args = args.replace("#USE_SHELL", "") args = args.replace("#USE_SHELL", "")
params['shell'] = True params['shell'] = True
@ -250,13 +260,8 @@ class CommandModule(AnsibleModule):
if '=' in x and not quoted: if '=' in x and not quoted:
# check to see if this is a special parameter for the command # check to see if this is a special parameter for the command
k, v = x.split('=', 1) k, v = x.split('=', 1)
v = unquote(v) v = unquote(v.strip())
# because we're not breaking out quotes in the shlex split if k in OPTIONS.keys():
# above, the value of the k=v pair may still be quoted. If
# so, remove them.
if len(v) > 1 and (v.startswith('"') and v.endswith('"') or v.startswith("'") and v.endswith("'")):
v = v[1:-1]
if k in ('creates', 'removes', 'chdir', 'executable', 'NO_LOG'):
if k == "chdir": if k == "chdir":
v = os.path.abspath(os.path.expanduser(v)) v = os.path.abspath(os.path.expanduser(v))
if not (os.path.exists(v) and os.path.isdir(v)): if not (os.path.exists(v) and os.path.isdir(v)):

View file

View file

@ -118,7 +118,7 @@ def db_exists(cursor, db):
return bool(res) return bool(res)
def db_delete(cursor, db): def db_delete(cursor, db):
query = "DROP DATABASE `%s`" % db query = "DROP DATABASE %s" % mysql_quote_identifier(db, 'database')
cursor.execute(query) cursor.execute(query)
return True return True
@ -190,12 +190,14 @@ def db_import(module, host, user, password, db_name, target, port, socket=None):
return rc, stdout, stderr return rc, stdout, stderr
def db_create(cursor, db, encoding, collation): def db_create(cursor, db, encoding, collation):
query_params = dict(enc=encoding, collate=collation)
query = ['CREATE DATABASE %s' % mysql_quote_identifier(db, 'database')]
if encoding: if encoding:
encoding = " CHARACTER SET %s" % encoding query.append("CHARACTER SET %(enc)s")
if collation: if collation:
collation = " COLLATE %s" % collation query.append("COLLATE %(collate)s")
query = "CREATE DATABASE `%s`%s%s" % (db, encoding, collation) query = ' '.join(query)
res = cursor.execute(query) res = cursor.execute(query, query_params)
return True return True
def strip_quotes(s): def strip_quotes(s):
@ -360,4 +362,6 @@ def main():
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
main() from ansible.module_utils.database import *
if __name__ == '__main__':
main()

View file

@ -117,6 +117,9 @@ EXAMPLES = """
# Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION' # Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION'
- mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present - mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present
# Modifiy user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself.
- mysql_user: name=bob append=true priv=*.*:REQUIRESSL state=present
# Ensure no user named 'sally' exists, also passing in the auth credentials. # Ensure no user named 'sally' exists, also passing in the auth credentials.
- mysql_user: login_user=root login_password=123456 name=sally state=absent - mysql_user: login_user=root login_password=123456 name=sally state=absent
@ -151,6 +154,19 @@ except ImportError:
else: else:
mysqldb_found = True mysqldb_found = True
VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION',
'LOCK TABLES', 'REFERENCES', 'EVENT', 'ALTER',
'DELETE', 'INDEX', 'INSERT', 'SELECT', 'UPDATE',
'CREATE TEMPORARY TABLES', 'TRIGGER', 'CREATE VIEW',
'SHOW VIEW', 'ALTER ROUTINE', 'CREATE ROUTINE',
'EXECUTE', 'FILE', 'CREATE TABLESPACE', 'CREATE USER',
'PROCESS', 'PROXY', 'RELOAD', 'REPLICATION CLIENT',
'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN',
'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE', 'REQUIRESSL'))
class InvalidPrivsError(Exception):
pass
# =========================================== # ===========================================
# MySQL module specific support methods. # MySQL module specific support methods.
# #
@ -171,7 +187,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs):
changed = False changed = False
grant_option = False grant_option = False
# Handle passwords. # Handle passwords
if password is not None: if password is not None:
cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host)) cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host))
current_pass_hash = cursor.fetchone() current_pass_hash = cursor.fetchone()
@ -181,7 +197,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs):
cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password)) cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password))
changed = True changed = True
# Handle privileges. # Handle privileges
if new_priv is not None: if new_priv is not None:
curr_priv = privileges_get(cursor, user,host) curr_priv = privileges_get(cursor, user,host)
@ -217,7 +233,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs):
return changed return changed
def user_delete(cursor, user, host): def user_delete(cursor, user, host):
cursor.execute("DROP USER %s@%s", (user,host)) cursor.execute("DROP USER %s@%s", (user, host))
return True return True
def privileges_get(cursor, user,host): def privileges_get(cursor, user,host):
@ -231,7 +247,7 @@ def privileges_get(cursor, user,host):
The dictionary format is the same as that returned by privileges_unpack() below. The dictionary format is the same as that returned by privileges_unpack() below.
""" """
output = {} output = {}
cursor.execute("SHOW GRANTS FOR %s@%s", (user,host)) cursor.execute("SHOW GRANTS FOR %s@%s", (user, host))
grants = cursor.fetchall() grants = cursor.fetchall()
def pick(x): def pick(x):
@ -243,11 +259,13 @@ def privileges_get(cursor, user,host):
for grant in grants: for grant in grants:
res = re.match("GRANT (.+) ON (.+) TO '.+'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0]) res = re.match("GRANT (.+) ON (.+) TO '.+'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0])
if res is None: if res is None:
module.fail_json(msg="unable to parse the MySQL grant string") raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0])
privileges = res.group(1).split(", ") privileges = res.group(1).split(", ")
privileges = [ pick(x) for x in privileges] privileges = [ pick(x) for x in privileges]
if "WITH GRANT OPTION" in res.group(4): if "WITH GRANT OPTION" in res.group(4):
privileges.append('GRANT') privileges.append('GRANT')
if "REQUIRE SSL" in res.group(4):
privileges.append('REQUIRESSL')
db = res.group(2) db = res.group(2)
output[db] = privileges output[db] = privileges
return output return output
@ -264,8 +282,8 @@ def privileges_unpack(priv):
not specified in the string, as MySQL will always provide this by default. not specified in the string, as MySQL will always provide this by default.
""" """
output = {} output = {}
for item in priv.split('/'): for item in priv.strip().split('/'):
pieces = item.split(':') pieces = item.strip().split(':')
if '.' in pieces[0]: if '.' in pieces[0]:
pieces[0] = pieces[0].split('.') pieces[0] = pieces[0].split('.')
for idx, piece in enumerate(pieces): for idx, piece in enumerate(pieces):
@ -274,27 +292,46 @@ def privileges_unpack(priv):
pieces[0] = '.'.join(pieces[0]) pieces[0] = '.'.join(pieces[0])
output[pieces[0]] = pieces[1].upper().split(',') output[pieces[0]] = pieces[1].upper().split(',')
new_privs = frozenset(output[pieces[0]])
if not new_privs.issubset(VALID_PRIVS):
raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS))
if '*.*' not in output: if '*.*' not in output:
output['*.*'] = ['USAGE'] output['*.*'] = ['USAGE']
# if we are only specifying something like REQUIRESSL in *.* we still need
# to add USAGE as a privilege to avoid syntax errors
if priv.find('REQUIRESSL') != -1 and 'USAGE' not in output['*.*']:
output['*.*'].append('USAGE')
return output return output
def privileges_revoke(cursor, user,host,db_table,grant_option): def privileges_revoke(cursor, user,host,db_table,grant_option):
# Escape '%' since mysql db.execute() uses a format string
db_table = db_table.replace('%', '%%')
if grant_option: if grant_option:
query = "REVOKE GRANT OPTION ON %s FROM '%s'@'%s'" % (db_table,user,host) query = ["REVOKE GRANT OPTION ON %s" % mysql_quote_identifier(db_table, 'table')]
cursor.execute(query) query.append("FROM %s@%s")
query = "REVOKE ALL PRIVILEGES ON %s FROM '%s'@'%s'" % (db_table,user,host) query = ' '.join(query)
cursor.execute(query) cursor.execute(query, (user, host))
query = ["REVOKE ALL PRIVILEGES ON %s" % mysql_quote_identifier(db_table, 'table')]
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
def privileges_grant(cursor, user,host,db_table,priv): def privileges_grant(cursor, user,host,db_table,priv):
# Escape '%' since mysql db.execute uses a format string and the
priv_string = ",".join(filter(lambda x: x != 'GRANT', priv)) # specification of db and table often use a % (SQL wildcard)
query = "GRANT %s ON %s TO '%s'@'%s'" % (priv_string,db_table,user,host) db_table = db_table.replace('%', '%%')
priv_string = ",".join(filter(lambda x: x not in [ 'GRANT', 'REQUIRESSL' ], priv))
query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))]
query.append("TO %s@%s")
if 'GRANT' in priv: if 'GRANT' in priv:
query = query + " WITH GRANT OPTION" query.append("WITH GRANT OPTION")
cursor.execute(query) if 'REQUIRESSL' in priv:
query.append("REQUIRE SSL")
query = ' '.join(query)
cursor.execute(query, (user, host))
def strip_quotes(s): def strip_quotes(s):
""" Remove surrounding single or double quotes """ Remove surrounding single or double quotes
@ -425,8 +462,8 @@ def main():
if priv is not None: if priv is not None:
try: try:
priv = privileges_unpack(priv) priv = privileges_unpack(priv)
except: except Exception, e:
module.fail_json(msg="invalid privileges string") module.fail_json(msg="invalid privileges string: %s" % str(e))
# Either the caller passes both a username and password with which to connect to # Either the caller passes both a username and password with which to connect to
# mysql, or they pass neither and allow this module to read the credentials from # mysql, or they pass neither and allow this module to read the credentials from
@ -459,11 +496,17 @@ def main():
if state == "present": if state == "present":
if user_exists(cursor, user, host): if user_exists(cursor, user, host):
try:
changed = user_mod(cursor, user, host, password, priv, append_privs) changed = user_mod(cursor, user, host, password, priv, append_privs)
except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e:
module.fail_json(msg=str(e))
else: else:
if password is None: if password is None:
module.fail_json(msg="password parameter required when adding a user") module.fail_json(msg="password parameter required when adding a user")
try:
changed = user_add(cursor, user, host, password, priv) changed = user_add(cursor, user, host, password, priv)
except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e:
module.fail_json(msg=str(e))
elif state == "absent": elif state == "absent":
if user_exists(cursor, user, host): if user_exists(cursor, user, host):
changed = user_delete(cursor, user, host) changed = user_delete(cursor, user, host)
@ -473,4 +516,6 @@ def main():
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
main() from ansible.module_utils.database import *
if __name__ == '__main__':
main()

View file

@ -103,7 +103,7 @@ def typedvalue(value):
def getvariable(cursor, mysqlvar): def getvariable(cursor, mysqlvar):
cursor.execute("SHOW VARIABLES LIKE '" + mysqlvar + "'") cursor.execute("SHOW VARIABLES LIKE %s", (mysqlvar,))
mysqlvar_val = cursor.fetchall() mysqlvar_val = cursor.fetchall()
return mysqlvar_val return mysqlvar_val
@ -116,8 +116,11 @@ def setvariable(cursor, mysqlvar, value):
should be passed as numeric literals. should be passed as numeric literals.
""" """
query = ["SET GLOBAL %s" % mysql_quote_identifier(mysqlvar, 'vars') ]
query.append(" = %s")
query = ' '.join(query)
try: try:
cursor.execute("SET GLOBAL " + mysqlvar + " = %s", (value,)) cursor.execute(query, (value,))
cursor.fetchall() cursor.fetchall()
result = True result = True
except Exception, e: except Exception, e:
@ -242,7 +245,10 @@ def main():
value_actual = typedvalue(mysqlvar_val[0][1]) value_actual = typedvalue(mysqlvar_val[0][1])
if value_wanted == value_actual: if value_wanted == value_actual:
module.exit_json(msg="Variable already set to requested value", changed=False) module.exit_json(msg="Variable already set to requested value", changed=False)
try:
result = setvariable(cursor, mysqlvar, value_wanted) result = setvariable(cursor, mysqlvar, value_wanted)
except SQLParseError, e:
result = str(e)
if result is True: if result is True:
module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, changed=True) module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, changed=True)
else: else:
@ -250,4 +256,5 @@ def main():
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.database import *
main() main()

View file

View file

@ -44,6 +44,11 @@ options:
- Host running the database - Host running the database
required: false required: false
default: localhost default: localhost
login_unix_socket:
description:
- Path to a Unix domain socket for local connections
required: false
default: null
owner: owner:
description: description:
- Name of the role to set as owner of the database - Name of the role to set as owner of the database
@ -124,7 +129,9 @@ class NotSupportedError(Exception):
# #
def set_owner(cursor, db, owner): def set_owner(cursor, db, owner):
query = "ALTER DATABASE \"%s\" OWNER TO \"%s\"" % (db, owner) query = "ALTER DATABASE %s OWNER TO %s" % (
pg_quote_identifier(db, 'database'),
pg_quote_identifier(owner, 'role'))
cursor.execute(query) cursor.execute(query)
return True return True
@ -141,7 +148,7 @@ def get_db_info(cursor, db):
FROM pg_database JOIN pg_roles ON pg_roles.oid = pg_database.datdba FROM pg_database JOIN pg_roles ON pg_roles.oid = pg_database.datdba
WHERE datname = %(db)s WHERE datname = %(db)s
""" """
cursor.execute(query, {'db':db}) cursor.execute(query, {'db': db})
return cursor.fetchone() return cursor.fetchone()
def db_exists(cursor, db): def db_exists(cursor, db):
@ -151,28 +158,28 @@ def db_exists(cursor, db):
def db_delete(cursor, db): def db_delete(cursor, db):
if db_exists(cursor, db): if db_exists(cursor, db):
query = "DROP DATABASE \"%s\"" % db query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database')
cursor.execute(query) cursor.execute(query)
return True return True
else: else:
return False return False
def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype): def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype):
params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype)
if not db_exists(cursor, db): if not db_exists(cursor, db):
query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')]
if owner: if owner:
owner = " OWNER \"%s\"" % owner query_fragments.append('OWNER %s' % pg_quote_identifier(owner, 'role'))
if template: if template:
template = " TEMPLATE \"%s\"" % template query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database'))
if encoding: if encoding:
encoding = " ENCODING '%s'" % encoding query_fragments.append('ENCODING %(enc)s')
if lc_collate: if lc_collate:
lc_collate = " LC_COLLATE '%s'" % lc_collate query_fragments.append('LC_COLLATE %(collate)s')
if lc_ctype: if lc_ctype:
lc_ctype = " LC_CTYPE '%s'" % lc_ctype query_fragments.append('LC_CTYPE %(ctype)s')
query = 'CREATE DATABASE "%s"%s%s%s%s%s' % (db, owner, query = ' '.join(query_fragments)
template, encoding, cursor.execute(query, params)
lc_collate, lc_ctype)
cursor.execute(query)
return True return True
else: else:
db_info = get_db_info(cursor, db) db_info = get_db_info(cursor, db)
@ -224,6 +231,7 @@ def main():
login_user=dict(default="postgres"), login_user=dict(default="postgres"),
login_password=dict(default=""), login_password=dict(default=""),
login_host=dict(default=""), login_host=dict(default=""),
login_unix_socket=dict(default=""),
port=dict(default="5432"), port=dict(default="5432"),
db=dict(required=True, aliases=['name']), db=dict(required=True, aliases=['name']),
owner=dict(default=""), owner=dict(default=""),
@ -260,6 +268,12 @@ def main():
} }
kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
if k in params_map and v != '' ) if k in params_map and v != '' )
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
try: try:
db_connection = psycopg2.connect(database="template1", **kw) db_connection = psycopg2.connect(database="template1", **kw)
# Enable autocommit so we can create databases # Enable autocommit so we can create databases
@ -284,13 +298,22 @@ def main():
module.exit_json(changed=changed,db=db) module.exit_json(changed=changed,db=db)
if state == "absent": if state == "absent":
try:
changed = db_delete(cursor, db) changed = db_delete(cursor, db)
except SQLParseError, e:
module.fail_json(msg=str(e))
elif state == "present": elif state == "present":
try:
changed = db_create(cursor, db, owner, template, encoding, changed = db_create(cursor, db, owner, template, encoding,
lc_collate, lc_ctype) lc_collate, lc_ctype)
except SQLParseError, e:
module.fail_json(msg=str(e))
except NotSupportedError, e: except NotSupportedError, e:
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))
except SystemExit:
# Avoid catching this on Python 2.4
raise
except Exception, e: except Exception, e:
module.fail_json(msg="Database query failed: %s" % e) module.fail_json(msg="Database query failed: %s" % e)
@ -298,4 +321,6 @@ def main():
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
main() from ansible.module_utils.database import *
if __name__ == '__main__':
main()

View file

@ -99,6 +99,12 @@ options:
- Database port to connect to. - Database port to connect to.
required: no required: no
default: 5432 default: 5432
unix_socket:
description:
- Path to a Unix domain socket for local connections.
- 'Alias: I(login_unix_socket)'
required: false
default: null
login: login:
description: description:
- The username to authenticate with. - The username to authenticate with.
@ -230,6 +236,9 @@ except ImportError:
psycopg2 = None psycopg2 = None
VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE',
'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT',
'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE'))
class Error(Exception): class Error(Exception):
pass pass
@ -264,6 +273,12 @@ class Connection(object):
} }
kw = dict( (params_map[k], getattr(params, k)) for k in params_map kw = dict( (params_map[k], getattr(params, k)) for k in params_map
if getattr(params, k) != '' ) if getattr(params, k) != '' )
# If a unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and params.unix_socket != "":
kw["host"] = params.unix_socket
self.connection = psycopg2.connect(**kw) self.connection = psycopg2.connect(**kw)
self.cursor = self.connection.cursor() self.cursor = self.connection.cursor()
@ -454,19 +469,21 @@ class Connection(object):
else: else:
obj_ids = ['"%s"' % o for o in objs] obj_ids = ['"%s"' % o for o in objs]
# set_what: SQL-fragment specifying what to set for the target roless: # set_what: SQL-fragment specifying what to set for the target roles:
# Either group membership or privileges on objects of a certain type. # Either group membership or privileges on objects of a certain type
if obj_type == 'group': if obj_type == 'group':
set_what = ','.join(obj_ids) set_what = ','.join(pg_quote_identifier(i, 'role') for i in obj_ids)
else: else:
# Note: obj_type has been checked against a set of string literals
# and privs was escaped when it was parsed
set_what = '%s ON %s %s' % (','.join(privs), obj_type, set_what = '%s ON %s %s' % (','.join(privs), obj_type,
','.join(obj_ids)) ','.join(pg_quote_identifier(i, 'table') for i in obj_ids))
# for_whom: SQL-fragment specifying for whom to set the above # for_whom: SQL-fragment specifying for whom to set the above
if roles == 'PUBLIC': if roles == 'PUBLIC':
for_whom = 'PUBLIC' for_whom = 'PUBLIC'
else: else:
for_whom = ','.join(['"%s"' % r for r in roles]) for_whom = ','.join(pg_quote_identifier(r, 'role') for r in roles)
status_before = get_status(objs) status_before = get_status(objs)
if state == 'present': if state == 'present':
@ -515,6 +532,7 @@ def main():
aliases=['admin_option']), aliases=['admin_option']),
host=dict(default='', aliases=['login_host']), host=dict(default='', aliases=['login_host']),
port=dict(type='int', default=5432), port=dict(type='int', default=5432),
unix_socket=dict(default='', aliases=['login_unix_socket']),
login=dict(default='postgres', aliases=['login_user']), login=dict(default='postgres', aliases=['login_user']),
password=dict(default='', aliases=['login_password']) password=dict(default='', aliases=['login_password'])
), ),
@ -558,7 +576,9 @@ def main():
try: try:
# privs # privs
if p.privs: if p.privs:
privs = p.privs.split(',') privs = frozenset(pr.upper() for pr in p.privs.split(','))
if not privs.issubset(VALID_PRIVS):
module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS))
else: else:
privs = None privs = None
@ -610,4 +630,6 @@ def main():
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
main() from ansible.module_utils.database import *
if __name__ == '__main__':
main()

View file

@ -78,6 +78,11 @@ options:
- Host running PostgreSQL. - Host running PostgreSQL.
required: false required: false
default: localhost default: localhost
login_unix_socket:
description:
- Path to a Unix domain socket for local connections
required: false
default: null
priv: priv:
description: description:
- "PostgreSQL privileges string in the format: C(table:priv1,priv2)" - "PostgreSQL privileges string in the format: C(table:priv1,priv2)"
@ -145,6 +150,7 @@ INSERT,UPDATE/table:SELECT/anothertable:ALL
''' '''
import re import re
import itertools
try: try:
import psycopg2 import psycopg2
@ -153,6 +159,19 @@ except ImportError:
else: else:
postgresqldb_found = True postgresqldb_found = True
_flags = ('SUPERUSER', 'CREATEROLE', 'CREATEUSER', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
VALID_FLAGS = frozenset(itertools.chain(_flags, ('NO%s' % f for f in _flags)))
VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL', 'USAGE')),
database=frozenset(('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL', 'USAGE')),
)
class InvalidFlagsError(Exception):
pass
class InvalidPrivsError(Exception):
pass
# =========================================== # ===========================================
# PostgreSQL module specific support methods. # PostgreSQL module specific support methods.
# #
@ -169,15 +188,16 @@ def user_exists(cursor, user):
def user_add(cursor, user, password, role_attr_flags, encrypted, expires): def user_add(cursor, user, password, role_attr_flags, encrypted, expires):
"""Create a new database user (role).""" """Create a new database user (role)."""
query_password_data = dict() # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a literal
query = 'CREATE USER "%(user)s"' % { "user": user} query_password_data = dict(password=password, expires=expires)
query = ['CREATE USER %(user)s' % { "user": pg_quote_identifier(user, 'role')}]
if password is not None: if password is not None:
query = query + " WITH %(crypt)s" % { "crypt": encrypted } query.append("WITH %(crypt)s" % { "crypt": encrypted })
query = query + " PASSWORD %(password)s" query.append("PASSWORD %(password)s")
query_password_data.update(password=password)
if expires is not None: if expires is not None:
query = query + " VALID UNTIL '%(expires)s'" % { "expires": expires } query.append("VALID UNTIL %(expires)s")
query = query + " " + role_attr_flags query.append(role_attr_flags)
query = ' '.join(query)
cursor.execute(query, query_password_data) cursor.execute(query, query_password_data)
return True return True
@ -185,6 +205,7 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir
"""Change user password and/or attributes. Return True if changed, False otherwise.""" """Change user password and/or attributes. Return True if changed, False otherwise."""
changed = False changed = False
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a literal
if user == 'PUBLIC': if user == 'PUBLIC':
if password is not None: if password is not None:
module.fail_json(msg="cannot change the password for PUBLIC user") module.fail_json(msg="cannot change the password for PUBLIC user")
@ -196,25 +217,24 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir
# Handle passwords. # Handle passwords.
if password is not None or role_attr_flags is not None: if password is not None or role_attr_flags is not None:
# Select password and all flag-like columns in order to verify changes. # Select password and all flag-like columns in order to verify changes.
query_password_data = dict() query_password_data = dict(password=password, expires=expires)
select = "SELECT * FROM pg_authid where rolname=%(user)s" select = "SELECT * FROM pg_authid where rolname=%(user)s"
cursor.execute(select, {"user": user}) cursor.execute(select, {"user": user})
# Grab current role attributes. # Grab current role attributes.
current_role_attrs = cursor.fetchone() current_role_attrs = cursor.fetchone()
alter = 'ALTER USER "%(user)s"' % {"user": user} alter = ['ALTER USER %(user)s' % {"user": pg_quote_identifier(user, 'role')}]
if password is not None: if password is not None:
query_password_data.update(password=password) alter.append("WITH %(crypt)s" % {"crypt": encrypted})
alter = alter + " WITH %(crypt)s" % {"crypt": encrypted} alter.append("PASSWORD %(password)s")
alter = alter + " PASSWORD %(password)s" alter.append(role_attr_flags)
alter = alter + " %(flags)s" % {'flags': role_attr_flags}
elif role_attr_flags: elif role_attr_flags:
alter = alter + ' WITH ' + role_attr_flags alter.append('WITH %s' % role_attr_flags)
if expires is not None: if expires is not None:
alter = alter + " VALID UNTIL '%(expires)s'" % { "exipres": expires } alter.append("VALID UNTIL %(expires)s")
try: try:
cursor.execute(alter, query_password_data) cursor.execute(' '.join(alter), query_password_data)
except psycopg2.InternalError, e: except psycopg2.InternalError, e:
if e.pgcode == '25006': if e.pgcode == '25006':
# Handle errors due to read-only transactions indicated by pgcode 25006 # Handle errors due to read-only transactions indicated by pgcode 25006
@ -240,7 +260,7 @@ def user_delete(cursor, user):
"""Try to remove a user. Returns True if successful otherwise False""" """Try to remove a user. Returns True if successful otherwise False"""
cursor.execute("SAVEPOINT ansible_pgsql_user_delete") cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
try: try:
cursor.execute("DROP USER \"%s\"" % user) cursor.execute("DROP USER %s" % pg_quote_identifier(user, 'role'))
except: except:
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete") cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
@ -264,36 +284,20 @@ def get_table_privileges(cursor, user, table):
cursor.execute(query, (user, table, schema)) cursor.execute(query, (user, table, schema))
return set([x[0] for x in cursor.fetchall()]) return set([x[0] for x in cursor.fetchall()])
def quote_pg_identifier(identifier):
"""
quote postgresql identifiers involving zero or more namespaces
"""
if '"' in identifier:
# the user has supplied their own quoting. we have to hope they're
# doing it right. Maybe they have an unfortunately named table
# containing a period in the name, such as: "public"."users.2013"
return identifier
tokens = identifier.strip().split(".")
quoted_tokens = []
for token in tokens:
quoted_tokens.append('"%s"' % (token, ))
return ".".join(quoted_tokens)
def grant_table_privilege(cursor, user, table, priv): def grant_table_privilege(cursor, user, table, priv):
# Note: priv escaped by parse_privs
prev_priv = get_table_privileges(cursor, user, table) prev_priv = get_table_privileges(cursor, user, table)
query = 'GRANT %s ON TABLE %s TO %s' % ( query = 'GRANT %s ON TABLE %s TO %s' % (
priv, quote_pg_identifier(table), quote_pg_identifier(user), ) priv, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') )
cursor.execute(query) cursor.execute(query)
curr_priv = get_table_privileges(cursor, user, table) curr_priv = get_table_privileges(cursor, user, table)
return len(curr_priv) > len(prev_priv) return len(curr_priv) > len(prev_priv)
def revoke_table_privilege(cursor, user, table, priv): def revoke_table_privilege(cursor, user, table, priv):
# Note: priv escaped by parse_privs
prev_priv = get_table_privileges(cursor, user, table) prev_priv = get_table_privileges(cursor, user, table)
query = 'REVOKE %s ON TABLE %s FROM %s' % ( query = 'REVOKE %s ON TABLE %s FROM %s' % (
priv, quote_pg_identifier(table), quote_pg_identifier(user), ) priv, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') )
cursor.execute(query) cursor.execute(query)
curr_priv = get_table_privileges(cursor, user, table) curr_priv = get_table_privileges(cursor, user, table)
return len(curr_priv) < len(prev_priv) return len(curr_priv) < len(prev_priv)
@ -324,21 +328,29 @@ def has_database_privilege(cursor, user, db, priv):
return cursor.fetchone()[0] return cursor.fetchone()[0]
def grant_database_privilege(cursor, user, db, priv): def grant_database_privilege(cursor, user, db, priv):
# Note: priv escaped by parse_privs
prev_priv = get_database_privileges(cursor, user, db) prev_priv = get_database_privileges(cursor, user, db)
if user == "PUBLIC": if user == "PUBLIC":
query = 'GRANT %s ON DATABASE \"%s\" TO PUBLIC' % (priv, db) query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
priv, pg_quote_identifier(db, 'database'))
else: else:
query = 'GRANT %s ON DATABASE \"%s\" TO \"%s\"' % (priv, db, user) query = 'GRANT %s ON DATABASE %s TO %s' % (
priv, pg_quote_identifier(db, 'database'),
pg_quote_identifier(user, 'role'))
cursor.execute(query) cursor.execute(query)
curr_priv = get_database_privileges(cursor, user, db) curr_priv = get_database_privileges(cursor, user, db)
return len(curr_priv) > len(prev_priv) return len(curr_priv) > len(prev_priv)
def revoke_database_privilege(cursor, user, db, priv): def revoke_database_privilege(cursor, user, db, priv):
# Note: priv escaped by parse_privs
prev_priv = get_database_privileges(cursor, user, db) prev_priv = get_database_privileges(cursor, user, db)
if user == "PUBLIC": if user == "PUBLIC":
query = 'REVOKE %s ON DATABASE \"%s\" FROM PUBLIC' % (priv, db) query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
priv, pg_quote_identifier(db, 'database'))
else: else:
query = 'REVOKE %s ON DATABASE \"%s\" FROM \"%s\"' % (priv, db, user) query = 'REVOKE %s ON DATABASE %s FROM %s' % (
priv, pg_quote_identifier(db, 'database'),
pg_quote_identifier(user, 'role'))
cursor.execute(query) cursor.execute(query)
curr_priv = get_database_privileges(cursor, user, db) curr_priv = get_database_privileges(cursor, user, db)
return len(curr_priv) < len(prev_priv) return len(curr_priv) < len(prev_priv)
@ -387,11 +399,20 @@ def parse_role_attrs(role_attr_flags):
Where: Where:
attributes := CREATEDB,CREATEROLE,NOSUPERUSER,... attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
[ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB",
"[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ]
""" """
if ',' not in role_attr_flags: if ',' in role_attr_flags:
return role_attr_flags flag_set = frozenset(r.upper() for r in role_attr_flags.split(","))
flag_set = role_attr_flags.split(",") elif role_attr_flags:
o_flags = " ".join(flag_set) flag_set = frozenset((role_attr_flags.upper(),))
else:
flag_set = frozenset()
if not flag_set.issubset(VALID_FLAGS):
raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
' '.join(flag_set.difference(VALID_FLAGS)))
o_flags = ' '.join(flag_set)
return o_flags return o_flags
def parse_privs(privs, db): def parse_privs(privs, db):
@ -417,12 +438,15 @@ def parse_privs(privs, db):
if ':' not in token: if ':' not in token:
type_ = 'database' type_ = 'database'
name = db name = db
priv_set = set(x.strip() for x in token.split(',')) priv_set = frozenset(x.strip().upper() for x in token.split(',') if x.strip())
else: else:
type_ = 'table' type_ = 'table'
name, privileges = token.split(':', 1) name, privileges = token.split(':', 1)
priv_set = set(x.strip() for x in privileges.split(',')) priv_set = frozenset(x.strip().upper() for x in privileges.split(',') if x.strip())
if not priv_set.issubset(VALID_PRIVS[type_]):
raise InvalidPrivsError('Invalid privs specified for %s: %s' %
(type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
o_privs[type_][name] = priv_set o_privs[type_][name] = priv_set
return o_privs return o_privs
@ -437,6 +461,7 @@ def main():
login_user=dict(default="postgres"), login_user=dict(default="postgres"),
login_password=dict(default=""), login_password=dict(default=""),
login_host=dict(default=""), login_host=dict(default=""),
login_unix_socket=dict(default=""),
user=dict(required=True, aliases=['name']), user=dict(required=True, aliases=['name']),
password=dict(default=None), password=dict(default=None),
state=dict(default="present", choices=["absent", "present"]), state=dict(default="present", choices=["absent", "present"]),
@ -460,7 +485,10 @@ def main():
module.fail_json(msg="privileges require a database to be specified") module.fail_json(msg="privileges require a database to be specified")
privs = parse_privs(module.params["priv"], db) privs = parse_privs(module.params["priv"], db)
port = module.params["port"] port = module.params["port"]
try:
role_attr_flags = parse_role_attrs(module.params["role_attr_flags"]) role_attr_flags = parse_role_attrs(module.params["role_attr_flags"])
except InvalidFlagsError, e:
module.fail_json(msg=str(e))
if module.params["encrypted"]: if module.params["encrypted"]:
encrypted = "ENCRYPTED" encrypted = "ENCRYPTED"
else: else:
@ -482,6 +510,12 @@ def main():
} }
kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
if k in params_map and v != "" ) if k in params_map and v != "" )
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
try: try:
db_connection = psycopg2.connect(**kw) db_connection = psycopg2.connect(**kw)
cursor = db_connection.cursor() cursor = db_connection.cursor()
@ -494,18 +528,30 @@ def main():
if state == "present": if state == "present":
if user_exists(cursor, user): if user_exists(cursor, user):
try:
changed = user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires) changed = user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires)
except SQLParseError, e:
module.fail_json(msg=str(e))
else: else:
try:
changed = user_add(cursor, user, password, role_attr_flags, encrypted, expires) changed = user_add(cursor, user, password, role_attr_flags, encrypted, expires)
except SQLParseError, e:
module.fail_json(msg=str(e))
try:
changed = grant_privileges(cursor, user, privs) or changed changed = grant_privileges(cursor, user, privs) or changed
except SQLParseError, e:
module.fail_json(msg=str(e))
else: else:
if user_exists(cursor, user): if user_exists(cursor, user):
if module.check_mode: if module.check_mode:
changed = True changed = True
kw['user_removed'] = True kw['user_removed'] = True
else: else:
try:
changed = revoke_privileges(cursor, user, privs) changed = revoke_privileges(cursor, user, privs)
user_removed = user_delete(cursor, user) user_removed = user_delete(cursor, user)
except SQLParseError, e:
module.fail_json(msg=str(e))
changed = changed or user_removed changed = changed or user_removed
if fail_on_user and not user_removed: if fail_on_user and not user_removed:
msg = "unable to remove user" msg = "unable to remove user"
@ -523,4 +569,5 @@ def main():
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.database import *
main() main()

View file

@ -153,8 +153,9 @@ def main():
) )
changed = False changed = False
pathmd5 = None path_md5 = None # Deprecated
destmd5 = None path_hash = None
dest_hash = None
src = os.path.expanduser(module.params['src']) src = os.path.expanduser(module.params['src'])
dest = os.path.expanduser(module.params['dest']) dest = os.path.expanduser(module.params['dest'])
backup = module.params['backup'] backup = module.params['backup']
@ -175,23 +176,29 @@ def main():
module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (e, regexp)) module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (e, regexp))
path = assemble_from_fragments(src, delimiter, compiled_regexp) path = assemble_from_fragments(src, delimiter, compiled_regexp)
pathmd5 = module.md5(path) path_hash = module.sha1(path)
if os.path.exists(dest): if os.path.exists(dest):
destmd5 = module.md5(dest) dest_hash = module.sha1(dest)
if pathmd5 != destmd5: if path_hash != dest_hash:
if backup and destmd5 is not None: if backup and dest_hash is not None:
module.backup_local(dest) module.backup_local(dest)
shutil.copy(path, dest) shutil.copy(path, dest)
changed = True changed = True
# Backwards compat. This won't return data if FIPS mode is active
try:
pathmd5 = module.md5(path)
except ValueError:
pathmd5 = None
os.remove(path) os.remove(path)
file_args = module.load_file_common_arguments(module.params) file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed) changed = module.set_fs_attributes_if_different(file_args, changed)
# Mission complete # Mission complete
module.exit_json(src=src, dest=dest, md5sum=pathmd5, changed=changed, msg="OK") module.exit_json(src=src, dest=dest, md5sum=pathmd5, checksum=path_hash, changed=changed, msg="OK")
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import *

View file

@ -27,7 +27,7 @@ module: copy
version_added: "historical" version_added: "historical"
short_description: Copies files to remote locations. short_description: Copies files to remote locations.
description: description:
- The M(copy) module copies a file on the local box to remote locations. - The M(copy) module copies a file on the local box to remote locations. Use the M(fetch) module to copy files from remote locations to the local box.
options: options:
src: src:
description: description:
@ -167,8 +167,13 @@ def main():
if not os.access(src, os.R_OK): if not os.access(src, os.R_OK):
module.fail_json(msg="Source %s not readable" % (src)) module.fail_json(msg="Source %s not readable" % (src))
checksum_src = module.sha1(src)
checksum_dest = None
# Backwards compat only. This will be None in FIPS mode
try:
md5sum_src = module.md5(src) md5sum_src = module.md5(src)
md5sum_dest = None except ValueError:
md5sum_src = None
changed = False changed = False
@ -176,7 +181,7 @@ def main():
if original_basename and dest.endswith("/"): if original_basename and dest.endswith("/"):
dest = os.path.join(dest, original_basename) dest = os.path.join(dest, original_basename)
dirname = os.path.dirname(dest) dirname = os.path.dirname(dest)
if not os.path.exists(dirname): if not os.path.exists(dirname) and '/' in dirname:
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname) (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname)
os.makedirs(dirname) os.makedirs(dirname)
directory_args = module.load_file_common_arguments(module.params) directory_args = module.load_file_common_arguments(module.params)
@ -198,7 +203,7 @@ def main():
basename = original_basename basename = original_basename
dest = os.path.join(dest, basename) dest = os.path.join(dest, basename)
if os.access(dest, os.R_OK): if os.access(dest, os.R_OK):
md5sum_dest = module.md5(dest) checksum_dest = module.sha1(dest)
else: else:
if not os.path.exists(os.path.dirname(dest)): if not os.path.exists(os.path.dirname(dest)):
try: try:
@ -215,7 +220,7 @@ def main():
module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest))) module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest)))
backup_file = None backup_file = None
if md5sum_src != md5sum_dest or os.path.islink(dest): if checksum_src != checksum_dest or os.path.islink(dest):
try: try:
if backup: if backup:
if os.path.exists(dest): if os.path.exists(dest):
@ -238,7 +243,7 @@ def main():
changed = False changed = False
res_args = dict( res_args = dict(
dest = dest, src = src, md5sum = md5sum_src, changed = changed dest = dest, src = src, md5sum = md5sum_src, checksum = checksum_src, changed = changed
) )
if backup_file: if backup_file:
res_args['backup_file'] = backup_file res_args['backup_file'] = backup_file

View file

@ -34,13 +34,14 @@ options:
required: false required: false
choices: [ "yes", "no" ] choices: [ "yes", "no" ]
default: "no" default: "no"
validate_md5: validate_checksum:
version_added: "1.4" version_added: "1.4"
description: description:
- Verify that the source and destination md5sums match after the files are fetched. - Verify that the source and destination checksums match after the files are fetched.
required: false required: false
choices: [ "yes", "no" ] choices: [ "yes", "no" ]
default: "yes" default: "yes"
aliases: [ "validate_md5" ]
flat: flat:
version_added: "1.2" version_added: "1.2"
description: description:

View file

@ -103,6 +103,23 @@ EXAMPLES = '''
''' '''
def get_state(path):
''' Find out current state '''
if os.path.lexists(path):
if os.path.islink(path):
return 'link'
elif os.path.isdir(path):
return 'directory'
elif os.stat(path).st_nlink > 1:
return 'hard'
else:
# could be many other things, but defaulting to file
return 'file'
return 'absent'
def main(): def main():
module = AnsibleModule( module = AnsibleModule(
@ -143,18 +160,7 @@ def main():
pass pass
module.exit_json(path=path, changed=False, appears_binary=appears_binary) module.exit_json(path=path, changed=False, appears_binary=appears_binary)
# Find out current state prev_state = get_state(path)
prev_state = 'absent'
if os.path.lexists(path):
if os.path.islink(path):
prev_state = 'link'
elif os.path.isdir(path):
prev_state = 'directory'
elif os.stat(path).st_nlink > 1:
prev_state = 'hard'
else:
# could be many other things, but defaulting to file
prev_state = 'file'
# state should default to file, but since that creates many conflicts, # state should default to file, but since that creates many conflicts,
# default to 'current' when it exists. # default to 'current' when it exists.
@ -168,22 +174,24 @@ def main():
# or copy module, even if this module never uses it, it is needed to key off some things # or copy module, even if this module never uses it, it is needed to key off some things
if src is not None: if src is not None:
src = os.path.expanduser(src) src = os.path.expanduser(src)
# original_basename is used by other modules that depend on file.
if os.path.isdir(path) and state not in ["link", "absent"]:
if params['original_basename']:
basename = params['original_basename']
else:
basename = os.path.basename(src)
params['path'] = path = os.path.join(path, basename)
else: else:
if state in ['link','hard']: if state in ['link','hard']:
if follow: if follow and state == 'link':
# use the current target of the link as the source # use the current target of the link as the source
src = os.readlink(path) src = os.readlink(path)
else: else:
module.fail_json(msg='src and dest are required for creating links') module.fail_json(msg='src and dest are required for creating links')
# original_basename is used by other modules that depend on file.
if os.path.isdir(path) and state not in ["link", "absent"]:
basename = None
if params['original_basename']:
basename = params['original_basename']
elif src is not None:
basename = os.path.basename(src)
if basename:
params['path'] = path = os.path.join(path, basename)
# make sure the target path is a directory when we're doing a recursive operation # make sure the target path is a directory when we're doing a recursive operation
recurse = params['recurse'] recurse = params['recurse']
if recurse and state != 'directory': if recurse and state != 'directory':
@ -210,7 +218,15 @@ def main():
module.exit_json(path=path, changed=False) module.exit_json(path=path, changed=False)
elif state == 'file': elif state == 'file':
if state != prev_state: if state != prev_state:
if follow and prev_state == 'link':
# follow symlink and operate on original
path = os.readlink(path)
prev_state = get_state(path)
file_args['path'] = path
if prev_state not in ['file','hard']:
# file is not absent and any other state is a conflict # file is not absent and any other state is a conflict
module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state)) module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state))
@ -218,6 +234,11 @@ def main():
module.exit_json(path=path, changed=changed) module.exit_json(path=path, changed=changed)
elif state == 'directory': elif state == 'directory':
if follow and prev_state == 'link':
path = os.readlink(path)
prev_state = get_state(path)
if prev_state == 'absent': if prev_state == 'absent':
if module.check_mode: if module.check_mode:
module.exit_json(changed=True) module.exit_json(changed=True)
@ -238,6 +259,10 @@ def main():
tmp_file_args['path']=curpath tmp_file_args['path']=curpath
changed = module.set_fs_attributes_if_different(tmp_file_args, changed) changed = module.set_fs_attributes_if_different(tmp_file_args, changed)
# We already know prev_state is not 'absent', therefore it exists in some form.
elif prev_state != 'directory':
module.fail_json(path=path, msg='%s already exists as a %s' % (path, prev_state))
changed = module.set_fs_attributes_if_different(file_args, changed) changed = module.set_fs_attributes_if_different(file_args, changed)
if recurse: if recurse:
@ -330,13 +355,13 @@ def main():
open(path, 'w').close() open(path, 'w').close()
except OSError, e: except OSError, e:
module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e)) module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e))
elif prev_state in ['file', 'directory']: elif prev_state in ['file', 'directory', 'hard']:
try: try:
os.utime(path, None) os.utime(path, None)
except OSError, e: except OSError, e:
module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e)) module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e))
else: else:
module.fail_json(msg='Cannot touch other than files and directories') module.fail_json(msg='Cannot touch other than files, directories, and hardlinks (%s is %s)' % (path, prev_state))
try: try:
module.set_fs_attributes_if_different(file_args, True) module.set_fs_attributes_if_different(file_args, True)
except SystemExit, e: except SystemExit, e:

View file

@ -23,6 +23,7 @@ DOCUMENTATION = '''
--- ---
module: ini_file module: ini_file
short_description: Tweak settings in INI files short_description: Tweak settings in INI files
extends_documentation_fragment: files
description: description:
- Manage (add, remove, change) individual settings in an INI-style file without having - Manage (add, remove, change) individual settings in an INI-style file without having
to manage the file as a whole with, say, M(template) or M(assemble). Adds missing to manage the file as a whole with, say, M(template) or M(assemble). Adds missing

View file

@ -28,12 +28,15 @@ DOCUMENTATION = """
--- ---
module: lineinfile module: lineinfile
author: Daniel Hokka Zakrisson, Ahti Kitsik author: Daniel Hokka Zakrisson, Ahti Kitsik
extends_documentation_fragment: files
short_description: Ensure a particular line is in a file, or replace an short_description: Ensure a particular line is in a file, or replace an
existing line using a back-referenced regular expression. existing line using a back-referenced regular expression.
description: description:
- This module will search a file for a line, and ensure that it is present or absent. - This module will search a file for a line, and ensure that it is present or absent.
- This is primarily useful when you want to change a single line in a - This is primarily useful when you want to change a single line in
file only. For other cases, see the M(copy) or M(template) modules. a file only. See the M(replace) module if you want to change
multiple, similar lines; for other cases, see the M(copy) or
M(template) modules.
version_added: "0.7" version_added: "0.7"
options: options:
dest: dest:
@ -127,7 +130,7 @@ options:
""" """
EXAMPLES = r""" EXAMPLES = r"""
- lineinfile: dest=/etc/selinux/config regexp=^SELINUX= line=SELINUX=disabled - lineinfile: dest=/etc/selinux/config regexp=^SELINUX= line=SELINUX=enforcing
- lineinfile: dest=/etc/sudoers state=absent regexp="^%wheel" - lineinfile: dest=/etc/sudoers state=absent regexp="^%wheel"
@ -145,7 +148,7 @@ EXAMPLES = r"""
- lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\1Xms${xms}m\3' backrefs=yes - lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\1Xms${xms}m\3' backrefs=yes
# Validate a the sudoers file before saving # Validate the sudoers file before saving
- lineinfile: dest=/etc/sudoers state=present regexp='^%ADMIN ALL\=' line='%ADMIN ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s' - lineinfile: dest=/etc/sudoers state=present regexp='^%ADMIN ALL\=' line='%ADMIN ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s'
""" """
@ -189,7 +192,7 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
if not create: if not create:
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
destpath = os.path.dirname(dest) destpath = os.path.dirname(dest)
if not os.path.exists(destpath): if not os.path.exists(destpath) and not module.check_mode:
os.makedirs(destpath) os.makedirs(destpath)
lines = [] lines = []
else: else:
@ -279,6 +282,9 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
backupdest = module.backup_local(dest) backupdest = module.backup_local(dest)
write_changes(module, lines, dest) write_changes(module, lines, dest)
if module.check_mode and not os.path.exists(dest):
module.exit_json(changed=changed, msg=msg, backup=backupdest)
msg, changed = check_file_attrs(module, changed, msg) msg, changed = check_file_attrs(module, changed, msg)
module.exit_json(changed=changed, msg=msg, backup=backupdest) module.exit_json(changed=changed, msg=msg, backup=backupdest)

View file

@ -26,6 +26,7 @@ DOCUMENTATION = """
--- ---
module: replace module: replace
author: Evan Kaufman author: Evan Kaufman
extends_documentation_fragment: files
short_description: Replace all instances of a particular string in a short_description: Replace all instances of a particular string in a
file using a back-referenced regular expression. file using a back-referenced regular expression.
description: description:

View file

@ -36,10 +36,17 @@ options:
aliases: [] aliases: []
get_md5: get_md5:
description: description:
- Whether to return the md5 sum of the file - Whether to return the md5 sum of the file. Will return None if we're unable to use md5 (Common for FIPS-140 compliant systems)
required: false required: false
default: yes default: yes
aliases: [] aliases: []
get_checksum:
description:
- Whether to return a checksum of the file (currently sha1)
required: false
default: yes
aliases: []
version_added: "1.8"
author: Bruce Pennypacker author: Bruce Pennypacker
''' '''
@ -51,12 +58,12 @@ EXAMPLES = '''
- fail: msg="Whoops! file ownership has changed" - fail: msg="Whoops! file ownership has changed"
when: st.stat.pw_name != 'root' when: st.stat.pw_name != 'root'
# Determine if a path exists and is a directory. Note we need to test # Determine if a path exists and is a directory. Note that we need to test
# both that p.stat.isdir actually exists, and also that it's set to true. # both that p.stat.isdir actually exists, and also that it's set to true.
- stat: path=/path/to/something - stat: path=/path/to/something
register: p register: p
- debug: msg="Path exists and is a directory" - debug: msg="Path exists and is a directory"
when: p.stat.isdir is defined and p.stat.isdir == true when: p.stat.isdir is defined and p.stat.isdir
# Don't do md5 checksum # Don't do md5 checksum
- stat: path=/path/to/myhugefile get_md5=no - stat: path=/path/to/myhugefile get_md5=no
@ -66,13 +73,15 @@ import os
import sys import sys
from stat import * from stat import *
import pwd import pwd
import grp
def main(): def main():
module = AnsibleModule( module = AnsibleModule(
argument_spec = dict( argument_spec = dict(
path = dict(required=True), path = dict(required=True),
follow = dict(default='no', type='bool'), follow = dict(default='no', type='bool'),
get_md5 = dict(default='yes', type='bool') get_md5 = dict(default='yes', type='bool'),
get_checksum = dict(default='yes', type='bool')
), ),
supports_check_mode = True supports_check_mode = True
) )
@ -81,6 +90,7 @@ def main():
path = os.path.expanduser(path) path = os.path.expanduser(path)
follow = module.params.get('follow') follow = module.params.get('follow')
get_md5 = module.params.get('get_md5') get_md5 = module.params.get('get_md5')
get_checksum = module.params.get('get_checksum')
try: try:
if follow: if follow:
@ -99,6 +109,7 @@ def main():
# back to ansible # back to ansible
d = { d = {
'exists' : True, 'exists' : True,
'path' : path,
'mode' : "%04o" % S_IMODE(mode), 'mode' : "%04o" % S_IMODE(mode),
'isdir' : S_ISDIR(mode), 'isdir' : S_ISDIR(mode),
'ischr' : S_ISCHR(mode), 'ischr' : S_ISCHR(mode),
@ -133,13 +144,23 @@ def main():
d['lnk_source'] = os.path.realpath(path) d['lnk_source'] = os.path.realpath(path)
if S_ISREG(mode) and get_md5 and os.access(path,os.R_OK): if S_ISREG(mode) and get_md5 and os.access(path,os.R_OK):
# Will fail on FIPS-140 compliant systems
try:
d['md5'] = module.md5(path) d['md5'] = module.md5(path)
except ValueError:
d['md5'] = None
if S_ISREG(mode) and get_checksum and os.access(path,os.R_OK):
d['checksum'] = module.sha1(path)
try: try:
pw = pwd.getpwuid(st.st_uid) pw = pwd.getpwuid(st.st_uid)
d['pw_name'] = pw.pw_name d['pw_name'] = pw.pw_name
grp_info = grp.getgrgid(pw.pw_gid)
d['gr_name'] = grp_info.gr_name
except: except:
pass pass

View file

@ -39,7 +39,7 @@ options:
version_added: "1.5" version_added: "1.5"
mode: mode:
description: description:
- Specify the direction of the synchroniztion. In push mode the localhost or delegate is the source; In pull mode the remote host in context is the source. - Specify the direction of the synchronization. In push mode the localhost or delegate is the source; In pull mode the remote host in context is the source.
required: false required: false
choices: [ 'push', 'pull' ] choices: [ 'push', 'pull' ]
default: 'push' default: 'push'
@ -145,6 +145,7 @@ options:
required: false required: false
version_added: "1.6" version_added: "1.6"
notes: notes:
- rsync must be installed on both the local and remote machine.
- Inspect the verbose output to validate the destination user/host/path - Inspect the verbose output to validate the destination user/host/path
are what was expected. are what was expected.
- The remote user for the dest path will always be the remote_user, not - The remote user for the dest path will always be the remote_user, not
@ -180,7 +181,9 @@ local_action: synchronize src=some/relative/path dest=/some/absolute/path
pull mode pull mode
synchronize: mode=pull src=some/relative/path dest=/some/absolute/path synchronize: mode=pull src=some/relative/path dest=/some/absolute/path
# Synchronization of src on delegate host to dest on the current inventory host # Synchronization of src on delegate host to dest on the current inventory host.
# If delegate_to is set to the current inventory host, this can be used to syncronize
# two directories on that host.
synchronize: > synchronize: >
src=some/relative/path dest=/some/absolute/path src=some/relative/path dest=/some/absolute/path
delegate_to: delegate.host delegate_to: delegate.host

View file

@ -24,6 +24,7 @@ DOCUMENTATION = '''
module: unarchive module: unarchive
version_added: 1.4 version_added: 1.4
short_description: Copies an archive to a remote location and unpack it short_description: Copies an archive to a remote location and unpack it
extends_documentation_fragment: files
description: description:
- The M(unarchive) module copies an archive file from the local machine to a remote and unpacks it. - The M(unarchive) module copies an archive file from the local machine to a remote and unpacks it.
options: options:
@ -75,18 +76,35 @@ EXAMPLES = '''
''' '''
import os import os
from zipfile import ZipFile
class UnarchiveError(Exception):
pass
# class to handle .zip files # class to handle .zip files
class ZipFile(object): class ZipArchive(object):
def __init__(self, src, dest, module): def __init__(self, src, dest, module):
self.src = src self.src = src
self.dest = dest self.dest = dest
self.module = module self.module = module
self.cmd_path = self.module.get_bin_path('unzip') self.cmd_path = self.module.get_bin_path('unzip')
self._files_in_archive = []
def is_unarchived(self): @property
def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
return self._files_in_archive
archive = ZipFile(self.src)
try:
self._files_in_archive = archive.namelist()
except:
raise UnarchiveError('Unable to list files in the archive')
return self._files_in_archive
def is_unarchived(self, mode, owner, group):
return dict(unarchived=False) return dict(unarchived=False)
def unarchive(self): def unarchive(self):
@ -105,7 +123,7 @@ class ZipFile(object):
# class to handle gzipped tar files # class to handle gzipped tar files
class TgzFile(object): class TgzArchive(object):
def __init__(self, src, dest, module): def __init__(self, src, dest, module):
self.src = src self.src = src
@ -113,11 +131,49 @@ class TgzFile(object):
self.module = module self.module = module
self.cmd_path = self.module.get_bin_path('tar') self.cmd_path = self.module.get_bin_path('tar')
self.zipflag = 'z' self.zipflag = 'z'
self._files_in_archive = []
def is_unarchived(self): @property
cmd = '%s -v -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src) def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
return self._files_in_archive
cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src)
rc, out, err = self.module.run_command(cmd)
if rc != 0:
raise UnarchiveError('Unable to list files in the archive')
for filename in out.splitlines():
if filename:
self._files_in_archive.append(filename)
return self._files_in_archive
def is_unarchived(self, mode, owner, group):
cmd = '%s -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src)
rc, out, err = self.module.run_command(cmd) rc, out, err = self.module.run_command(cmd)
unarchived = (rc == 0) unarchived = (rc == 0)
if not unarchived:
# Check whether the differences are in something that we're
# setting anyway
# What will be set
to_be_set = set()
for perm in (('Mode', mode), ('Gid', group), ('Uid', owner)):
if perm[1] is not None:
to_be_set.add(perm[0])
# What is different
changes = set()
difference_re = re.compile(r': (.*) differs$')
for line in out.splitlines():
match = difference_re.search(line)
if not match:
# Unknown tar output. Assume we have changes
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
changes.add(match.groups()[0])
if changes and changes.issubset(to_be_set):
unarchived = True
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd) return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
def unarchive(self): def unarchive(self):
@ -128,47 +184,41 @@ class TgzFile(object):
def can_handle_archive(self): def can_handle_archive(self):
if not self.cmd_path: if not self.cmd_path:
return False return False
cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src)
rc, out, err = self.module.run_command(cmd) try:
if rc == 0: if self.files_in_archive:
if len(out.splitlines(True)) > 0:
return True return True
except UnarchiveError:
pass
# Errors and no files in archive assume that we weren't able to
# properly unarchive it
return False return False
# class to handle tar files that aren't compressed # class to handle tar files that aren't compressed
class TarFile(TgzFile): class TarArchive(TgzArchive):
def __init__(self, src, dest, module): def __init__(self, src, dest, module):
self.src = src super(TarArchive, self).__init__(src, dest, module)
self.dest = dest
self.module = module
self.cmd_path = self.module.get_bin_path('tar')
self.zipflag = '' self.zipflag = ''
# class to handle bzip2 compressed tar files # class to handle bzip2 compressed tar files
class TarBzip(TgzFile): class TarBzipArchive(TgzArchive):
def __init__(self, src, dest, module): def __init__(self, src, dest, module):
self.src = src super(TarBzipArchive, self).__init__(src, dest, module)
self.dest = dest
self.module = module
self.cmd_path = self.module.get_bin_path('tar')
self.zipflag = 'j' self.zipflag = 'j'
# class to handle xz compressed tar files # class to handle xz compressed tar files
class TarXz(TgzFile): class TarXzArchive(TgzArchive):
def __init__(self, src, dest, module): def __init__(self, src, dest, module):
self.src = src super(TarXzArchive, self).__init__(src, dest, module)
self.dest = dest
self.module = module
self.cmd_path = self.module.get_bin_path('tar')
self.zipflag = 'J' self.zipflag = 'J'
# try handlers in order and return the one that works or bail if none work # try handlers in order and return the one that works or bail if none work
def pick_handler(src, dest, module): def pick_handler(src, dest, module):
handlers = [TgzFile, ZipFile, TarFile, TarBzip, TarXz] handlers = [TgzArchive, ZipArchive, TarArchive, TarBzipArchive, TarXzArchive]
for handler in handlers: for handler in handlers:
obj = handler(src, dest, module) obj = handler(src, dest, module)
if obj.can_handle_archive(): if obj.can_handle_archive():
@ -192,7 +242,7 @@ def main():
src = os.path.expanduser(module.params['src']) src = os.path.expanduser(module.params['src'])
dest = os.path.expanduser(module.params['dest']) dest = os.path.expanduser(module.params['dest'])
copy = module.params['copy'] copy = module.params['copy']
creates = module.params['creates'] file_args = module.load_file_common_arguments(module.params)
# did tar file arrive? # did tar file arrive?
if not os.path.exists(src): if not os.path.exists(src):
@ -203,20 +253,6 @@ def main():
if not os.access(src, os.R_OK): if not os.access(src, os.R_OK):
module.fail_json(msg="Source '%s' not readable" % src) module.fail_json(msg="Source '%s' not readable" % src)
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
v = os.path.expanduser(creates)
if os.path.exists(v):
module.exit_json(
stdout="skipped, since %s exists" % v,
skipped=True,
changed=False,
stderr=False,
rc=0
)
# is dest OK to receive tar file? # is dest OK to receive tar file?
if not os.path.isdir(dest): if not os.path.isdir(dest):
module.fail_json(msg="Destination '%s' is not a directory" % dest) module.fail_json(msg="Destination '%s' is not a directory" % dest)
@ -228,11 +264,11 @@ def main():
res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src) res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)
# do we need to do unpack? # do we need to do unpack?
res_args['check_results'] = handler.is_unarchived() res_args['check_results'] = handler.is_unarchived(file_args['mode'],
file_args['owner'], file_args['group'])
if res_args['check_results']['unarchived']: if res_args['check_results']['unarchived']:
res_args['changed'] = False res_args['changed'] = False
module.exit_json(**res_args) else:
# do the unpack # do the unpack
try: try:
res_args['extract_results'] = handler.unarchive() res_args['extract_results'] = handler.unarchive()
@ -240,11 +276,17 @@ def main():
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args) module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
except IOError: except IOError:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest)) module.fail_json(msg="failed to unpack %s to %s" % (src, dest))
else:
res_args['changed'] = True res_args['changed'] = True
# do we need to change perms?
for filename in handler.files_in_archive:
file_args['path'] = os.path.join(dest, filename)
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])
module.exit_json(**res_args) module.exit_json(**res_args)
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
main() if __name__ == '__main__':
main()

View file

View file

@ -154,7 +154,7 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10):
if info['status'] == 304: if info['status'] == 304:
module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', '')) module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', ''))
# create a temporary file and copy content to do md5-based replacement # create a temporary file and copy content to do checksum-based replacement
if info['status'] != 200: if info['status'] != 200:
module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest) module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest)
@ -241,8 +241,8 @@ def main():
filename = url_filename(info['url']) filename = url_filename(info['url'])
dest = os.path.join(dest, filename) dest = os.path.join(dest, filename)
md5sum_src = None checksum_src = None
md5sum_dest = None checksum_dest = None
# raise an error if there is no tmpsrc file # raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc): if not os.path.exists(tmpsrc):
@ -251,7 +251,7 @@ def main():
if not os.access(tmpsrc, os.R_OK): if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc) os.remove(tmpsrc)
module.fail_json( msg="Source %s not readable" % (tmpsrc)) module.fail_json( msg="Source %s not readable" % (tmpsrc))
md5sum_src = module.md5(tmpsrc) checksum_src = module.sha1(tmpsrc)
# check if there is no dest file # check if there is no dest file
if os.path.exists(dest): if os.path.exists(dest):
@ -262,13 +262,13 @@ def main():
if not os.access(dest, os.R_OK): if not os.access(dest, os.R_OK):
os.remove(tmpsrc) os.remove(tmpsrc)
module.fail_json( msg="Destination %s not readable" % (dest)) module.fail_json( msg="Destination %s not readable" % (dest))
md5sum_dest = module.md5(dest) checksum_dest = module.sha1(dest)
else: else:
if not os.access(os.path.dirname(dest), os.W_OK): if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc) os.remove(tmpsrc)
module.fail_json( msg="Destination %s not writable" % (os.path.dirname(dest))) module.fail_json( msg="Destination %s not writable" % (os.path.dirname(dest)))
if md5sum_src != md5sum_dest: if checksum_src != checksum_dest:
try: try:
shutil.copyfile(tmpsrc, dest) shutil.copyfile(tmpsrc, dest)
except Exception, err: except Exception, err:
@ -303,8 +303,15 @@ def main():
file_args['path'] = dest file_args['path'] = dest
changed = module.set_fs_attributes_if_different(file_args, changed) changed = module.set_fs_attributes_if_different(file_args, changed)
# Backwards compat only. We'll return None on FIPS enabled systems
try:
md5sum = module.md5(dest)
except ValueError:
md5sum = None
# Mission complete # Mission complete
module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum_src,
module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum, checksum=checksum_src,
sha256sum=sha256sum, changed=changed, msg=info.get('msg', '')) sha256sum=sha256sum, changed=changed, msg=info.get('msg', ''))
# import module snippets # import module snippets

View file

@ -194,8 +194,8 @@ def write_file(module, url, dest, content):
module.fail_json(msg="failed to create temporary content file: %s" % str(err)) module.fail_json(msg="failed to create temporary content file: %s" % str(err))
f.close() f.close()
md5sum_src = None checksum_src = None
md5sum_dest = None checksum_dest = None
# raise an error if there is no tmpsrc file # raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc): if not os.path.exists(tmpsrc):
@ -204,7 +204,7 @@ def write_file(module, url, dest, content):
if not os.access(tmpsrc, os.R_OK): if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc) os.remove(tmpsrc)
module.fail_json( msg="Source %s not readable" % (tmpsrc)) module.fail_json( msg="Source %s not readable" % (tmpsrc))
md5sum_src = module.md5(tmpsrc) checksum_src = module.sha1(tmpsrc)
# check if there is no dest file # check if there is no dest file
if os.path.exists(dest): if os.path.exists(dest):
@ -215,13 +215,13 @@ def write_file(module, url, dest, content):
if not os.access(dest, os.R_OK): if not os.access(dest, os.R_OK):
os.remove(tmpsrc) os.remove(tmpsrc)
module.fail_json( msg="Destination %s not readable" % (dest)) module.fail_json( msg="Destination %s not readable" % (dest))
md5sum_dest = module.md5(dest) checksum_dest = module.sha1(dest)
else: else:
if not os.access(os.path.dirname(dest), os.W_OK): if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc) os.remove(tmpsrc)
module.fail_json( msg="Destination dir %s not writable" % (os.path.dirname(dest))) module.fail_json( msg="Destination dir %s not writable" % (os.path.dirname(dest)))
if md5sum_src != md5sum_dest: if checksum_src != checksum_dest:
try: try:
shutil.copyfile(tmpsrc, dest) shutil.copyfile(tmpsrc, dest)
except Exception, err: except Exception, err:
@ -426,7 +426,8 @@ def main():
uresp[ukey] = value uresp[ukey] = value
if 'content_type' in uresp: if 'content_type' in uresp:
if uresp['content_type'].startswith('application/json'): if uresp['content_type'].startswith('application/json') or \
uresp['content_type'].startswith('text/json'):
try: try:
js = json.loads(content) js = json.loads(content)
uresp['json'] = js uresp['json'] = js

Some files were not shown because too many files have changed in this diff Show more