Merge branch 'devel' into openbsd_rcctl

This commit is contained in:
Jonathan Armani 2015-01-23 13:01:34 +01:00
commit 645e0653a3
141 changed files with 2444 additions and 1214 deletions

View file

@ -50,7 +50,7 @@ options:
state:
description:
- If state is "present", stack will be created. If state is "present" and if stack exists and template has changed, it will be updated.
If state is absent, stack will be removed.
If state is "absent", stack will be removed.
required: true
default: null
aliases: []
@ -60,6 +60,13 @@ options:
required: true
default: null
aliases: []
stack_policy:
description:
- the path of the cloudformation stack policy
required: false
default: null
aliases: []
version_added: "x.x"
tags:
description:
- Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later.
@ -97,18 +104,19 @@ EXAMPLES = '''
# Basic task example
tasks:
- name: launch ansible cloudformation example
action: cloudformation >
stack_name="ansible-cloudformation" state=present
region=us-east-1 disable_rollback=true
template=files/cloudformation-example.json
args:
cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
disable_rollback: true
template: "files/cloudformation-example.json"
template_parameters:
KeyName: jmartin
DiskType: ephemeral
InstanceType: m1.small
KeyName: "jmartin"
DiskType: "ephemeral"
InstanceType: "m1.small"
ClusterSize: 3
tags:
Stack: ansible-cloudformation
Stack: "ansible-cloudformation"
'''
import json
@ -122,13 +130,6 @@ except ImportError:
sys.exit(1)
class Region:
def __init__(self, region):
'''connects boto to the region specified in the cloudformation template'''
self.name = region
self.endpoint = 'cloudformation.%s.amazonaws.com' % region
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
@ -196,6 +197,7 @@ def main():
template_parameters=dict(required=False, type='dict', default={}),
state=dict(default='present', choices=['present', 'absent']),
template=dict(default=None, required=True),
stack_policy=dict(default=None, required=False),
disable_rollback=dict(default=False, type='bool'),
tags=dict(default=None)
)
@ -208,6 +210,10 @@ def main():
state = module.params['state']
stack_name = module.params['stack_name']
template_body = open(module.params['template'], 'r').read()
if module.params['stack_policy'] is not None:
stack_policy_body = open(module.params['stack_policy'], 'r').read()
else:
stack_policy_body = None
disable_rollback = module.params['disable_rollback']
template_parameters = module.params['template_parameters']
tags = module.params['tags']
@ -226,11 +232,10 @@ def main():
stack_outputs = {}
try:
cf_region = Region(region)
cfn = boto.cloudformation.connection.CloudFormationConnection(
aws_access_key_id=aws_access_key,
cfn = boto.cloudformation.connect_to_region(
region,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
region=cf_region,
)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
@ -244,6 +249,7 @@ def main():
try:
cfn.create_stack(stack_name, parameters=template_parameters_tup,
template_body=template_body,
stack_policy_body=stack_policy_body,
disable_rollback=disable_rollback,
capabilities=['CAPABILITY_IAM'],
**kwargs)
@ -264,6 +270,7 @@ def main():
try:
cfn.update_stack(stack_name, parameters=template_parameters_tup,
template_body=template_body,
stack_policy_body=stack_policy_body,
disable_rollback=disable_rollback,
capabilities=['CAPABILITY_IAM'])
operation = 'UPDATE'

View file

@ -17,9 +17,9 @@
DOCUMENTATION = '''
---
module: ec2
short_description: create, terminate, start or stop an instance in ec2, return instanceid
short_description: create, terminate, start or stop an instance in ec2
description:
- Creates or terminates ec2 instances. When created optionally waits for it to be 'running'. This module has a dependency on python-boto >= 2.5
- Creates or terminates ec2 instances.
version_added: "0.9"
options:
key_name:
@ -28,12 +28,6 @@ options:
required: false
default: null
aliases: ['keypair']
id:
description:
- identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
required: false
default: null
aliases: []
group:
description:
- security group (or list of groups) to use with the instance
@ -67,6 +61,13 @@ options:
required: true
default: null
aliases: []
tenancy:
version_added: "1.9"
description:
- An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are "default" or "dedicated". Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances.
required: false
default: default
aliases: []
spot_price:
version_added: "1.5"
description:
@ -76,7 +77,7 @@ options:
aliases: []
image:
description:
- I(emi) (or I(ami)) to use for the instance
- I(ami) ID to use for the instance
required: true
default: null
aliases: []
@ -94,7 +95,7 @@ options:
aliases: []
wait:
description:
- wait for the instance to be in state 'running' before returning
- wait for the instance to be 'running' before returning. Does not wait for SSH, see 'wait_for' example for details.
required: false
default: "no"
choices: [ "yes", "no" ]
@ -226,54 +227,55 @@ extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic provisioning example
- local_action:
module: ec2
- ec2:
key_name: mykey
instance_type: c1.medium
image: emi-40603AD1
instance_type: t2.micro
image: ami-123456
wait: yes
group: webserver
count: 3
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Advanced example with tagging and CloudWatch
- local_action:
module: ec2
- ec2:
key_name: mykey
group: databases
instance_type: m1.large
image: ami-6e649707
instance_type: t2.micro
image: ami-123456
wait: yes
wait_timeout: 500
count: 5
instance_tags:
db: postgres
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Single instance with additional IOPS volume from snapshot and volume delete on termination
local_action:
module: ec2
- ec2:
key_name: mykey
group: webserver
instance_type: m1.large
image: ami-6e649707
instance_type: c3.medium
image: ami-123456
wait: yes
wait_timeout: 500
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
device_type: io1
iops: 1000
volume_size: 100
delete_on_termination: true
- device_name: /dev/sdb
snapshot: snap-abcdef12
device_type: io1
iops: 1000
volume_size: 100
delete_on_termination: true
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Multiple groups example
local_action:
module: ec2
- ec2:
key_name: mykey
group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
instance_type: m1.large
@ -284,10 +286,11 @@ local_action:
instance_tags:
db: postgres
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Multiple instances with additional volume from snapshot
local_action:
module: ec2
- ec2:
key_name: mykey
group: webserver
instance_type: m1.large
@ -300,21 +303,23 @@ local_action:
snapshot: snap-abcdef12
volume_size: 10
monitoring: yes
# VPC example
- local_action:
module: ec2
key_name: mykey
group_id: sg-1dc53f72
instance_type: m1.small
image: ami-6e649707
wait: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Spot instance example
# Dedicated tenancy example
- local_action:
module: ec2
assign_public_ip: yes
group_id: sg-1dc53f72
key_name: mykey
image: ami-6e649707
instance_type: m1.small
tenancy: dedicated
vpc_subnet_id: subnet-29e63245
wait: yes
# Spot instance example
- ec2:
spot_price: 0.24
spot_wait_timeout: 600
keypair: mykey
@ -328,7 +333,6 @@ local_action:
# Launch instances, runs some tasks
# and then terminate them
- name: Create a sandbox instance
hosts: localhost
gather_facts: False
@ -340,13 +344,21 @@ local_action:
region: us-east-1
tasks:
- name: Launch instance
local_action: ec2 key_name={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image }} wait=true region={{ region }}
ec2:
key_name: "{{ keypair }}"
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: true
region: "{{ region }}"
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
register: ec2
- name: Add new instance to host group
local_action: add_host hostname={{ item.public_ip }} groupname=launched
add_host: hostname={{ item.public_ip }} groupname=launched
with_items: ec2.instances
- name: Wait for SSH to come up
local_action: wait_for host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started
wait_for: host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started
with_items: ec2.instances
- name: Configure instance(s)
@ -362,8 +374,7 @@ local_action:
connection: local
tasks:
- name: Terminate instances that were previously launched
local_action:
module: ec2
ec2:
state: 'absent'
instance_ids: '{{ ec2.instance_ids }}'
@ -382,12 +393,13 @@ local_action:
region: us-east-1
tasks:
- name: Start the sandbox instances
local_action:
module: ec2
ec2:
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: running
wait: True
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
role:
- do_neat_stuff
- do_more_neat_stuff
@ -403,39 +415,41 @@ local_action:
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Stop the sanbox instances
local_action:
module: ec2
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: stopped
wait: True
- name: Stop the sandbox instances
ec2:
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: stopped
wait: True
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# Enforce that 5 instances with a tag "foo" are running
# (Highly recommended!)
#
- local_action:
module: ec2
- ec2:
key_name: mykey
instance_type: c1.medium
image: emi-40603AD1
image: ami-40603AD1
wait: yes
group: webserver
instance_tags:
foo: bar
exact_count: 5
count_tag: foo
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# Enforce that 5 running instances named "database" with a "dbtype" of "postgres"
#
- local_action:
module: ec2
- ec2:
key_name: mykey
instance_type: c1.medium
image: emi-40603AD1
image: ami-40603AD1
wait: yes
group: webserver
instance_tags:
@ -445,6 +459,8 @@ local_action:
count_tag:
Name: database
dbtype: postgres
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# count_tag complex argument examples
@ -501,7 +517,7 @@ def _set_none_to_blank(dictionary):
result = dictionary
for k in result.iterkeys():
if type(result[k]) == dict:
result[k] = _set_non_to_blank(result[k])
result[k] = _set_none_to_blank(result[k])
elif not result[k]:
result[k] = ""
return result
@ -585,6 +601,11 @@ def get_instance_info(inst):
except AttributeError:
instance_info['ebs_optimized'] = False
try:
instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
except AttributeError:
instance_info['tenancy'] = 'default'
return instance_info
def boto_supports_associate_public_ip_address(ec2):
@ -660,6 +681,11 @@ def enforce_count(module, ec2):
count_tag = module.params.get('count_tag')
zone = module.params.get('zone')
# fail here if the exact count was specified without filtering
# on a tag, as this may lead to a undesired removal of instances
if exact_count and count_tag is None:
module.fail_json(msg="you must use the 'count_tag' option with exact_count")
reservations, instances = find_running_instances_by_count_tag(module, ec2, count_tag, zone)
changed = None
@ -723,6 +749,7 @@ def create_instances(module, ec2, override_count=None):
group_id = module.params.get('group_id')
zone = module.params.get('zone')
instance_type = module.params.get('instance_type')
tenancy = module.params.get('tenancy')
spot_price = module.params.get('spot_price')
image = module.params.get('image')
if override_count:
@ -806,6 +833,9 @@ def create_instances(module, ec2, override_count=None):
if ebs_optimized:
params['ebs_optimized'] = ebs_optimized
if tenancy:
params['tenancy'] = tenancy
if boto_supports_profile_name_arg(ec2):
params['instance_profile_name'] = instance_profile_name
@ -1148,6 +1178,7 @@ def main():
count_tag = dict(),
volumes = dict(type='list'),
ebs_optimized = dict(type='bool', default=False),
tenancy = dict(default='default'),
)
)

View file

@ -18,9 +18,9 @@ DOCUMENTATION = '''
---
module: ec2_ami
version_added: "1.3"
short_description: create or destroy an image in ec2, return imageid
short_description: create or destroy an image in ec2
description:
- Creates or deletes ec2 images. This module has a dependency on python-boto >= 2.5
- Creates or deletes ec2 images.
options:
instance_id:
description:
@ -79,7 +79,7 @@ options:
aliases: []
delete_snapshot:
description:
- Whether or not to deleted an AMI while deregistering it.
- Whether or not to delete an AMI while deregistering it.
required: false
default: null
aliases: []
@ -89,13 +89,10 @@ extends_documentation_fragment: aws
'''
# Thank you to iAcquire for sponsoring development of this module.
#
# See http://alestic.com/2011/06/ec2-ami-security for more information about ensuring the security of your AMI.
EXAMPLES = '''
# Basic AMI Creation
- local_action:
module: ec2_ami
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
@ -104,8 +101,7 @@ EXAMPLES = '''
register: instance
# Basic AMI Creation, without waiting
- local_action:
module: ec2_ami
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
@ -115,22 +111,20 @@ EXAMPLES = '''
register: instance
# Deregister/Delete AMI
- local_action:
module: ec2_ami
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: ${instance.image_id}
image_id: "{{ instance.image_id }}"
delete_snapshot: True
state: absent
# Deregister AMI
- local_action:
module: ec2_ami
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: ${instance.image_id}
image_id: "{{ instance.image_id }}"
delete_snapshot: False
state: absent

View file

@ -16,10 +16,11 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_ami_search
short_description: Retrieve AWS AMI for a given operating system.
short_description: Retrieve AWS AMI information for a given operating system.
version_added: "1.6"
description:
- Look up the most recent AMI on AWS for a given operating system.
@ -56,7 +57,8 @@ options:
required: false
default: us-east-1
choices: ["ap-northeast-1", "ap-southeast-1", "ap-southeast-2",
"eu-west-1", "sa-east-1", "us-east-1", "us-west-1", "us-west-2", "us-gov-west-1"]
"eu-central-1", "eu-west-1", "sa-east-1", "us-east-1",
"us-west-1", "us-west-2", "us-gov-west-1"]
virt:
description: virutalization type
required: false
@ -88,11 +90,13 @@ SUPPORTED_DISTROS = ['ubuntu']
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-central-1',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
'us-west-2',
"us-gov-west-1"]
def get_url(module, url):

74
cloud/ec2_asg.py → cloud/amazon/ec2_asg.py Executable file → Normal file
View file

@ -119,21 +119,23 @@ extends_documentation_fragment: aws
"""
EXAMPLES = '''
A basic example of configuration:
# Basic configuration
- ec2_asg:
name: special
load_balancers: 'lb1,lb2'
availability_zones: 'eu-west-1a,eu-west-1b'
load_balancers: [ 'lb1', 'lb2' ]
availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
launch_config_name: 'lc-1'
min_size: 1
max_size: 10
desired_capacity: 5
vpc_zone_identifier: 'subnet-abcd1234,subnet-1a2b3c4d'
vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
tags:
- environment: production
propagate_at_launch: no
# Rolling ASG Updates
Below is an example of how to assign a new launch config to an ASG and terminate old instances.
All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
@ -199,7 +201,7 @@ except ImportError:
ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity',
'health_check_period', 'health_check_type', 'launch_config_name',
'load_balancers', 'max_size', 'min_size', 'name', 'placement_group',
'tags', 'termination_policies', 'vpc_zone_identifier')
'termination_policies', 'vpc_zone_identifier')
INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
@ -245,6 +247,10 @@ def get_properties(autoscaling_group):
properties['pending_instances'] += 1
properties['instance_facts'] = instance_facts
properties['load_balancers'] = autoscaling_group.load_balancers
if getattr(autoscaling_group, "tags", None):
properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags)
return properties
@ -268,8 +274,10 @@ def create_autoscaling_group(connection, module):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
elif vpc_zone_identifier:
vpc_zone_identifier = ','.join(vpc_zone_identifier)
asg_tags = []
for tag in set_tags:
@ -318,6 +326,8 @@ def create_autoscaling_group(connection, module):
for attr in ASG_ATTRIBUTES:
if module.params.get(attr):
module_attr = module.params.get(attr)
if attr == 'vpc_zone_identifier':
module_attr = ','.join(module_attr)
group_attr = getattr(as_group, attr)
# we do this because AWS and the module may return the same list
# sorted differently
@ -357,6 +367,7 @@ def create_autoscaling_group(connection, module):
continue
if changed:
connection.create_or_update_tags(asg_tags)
as_group.tags = asg_tags
# handle loadbalancers separately because None != []
load_balancers = module.params.get('load_balancers') or []
@ -373,26 +384,6 @@ def create_autoscaling_group(connection, module):
module.fail_json(msg=str(e))
result = as_groups[0]
module.exit_json(changed=changed, name=result.name,
autoscaling_group_arn=result.autoscaling_group_arn,
availability_zones=result.availability_zones,
created_time=str(result.created_time),
default_cooldown=result.default_cooldown,
health_check_period=result.health_check_period,
health_check_type=result.health_check_type,
instance_id=result.instance_id,
instances=[instance.instance_id for instance in result.instances],
launch_config_name=result.launch_config_name,
load_balancers=result.load_balancers,
min_size=result.min_size, max_size=result.max_size,
placement_group=result.placement_group,
wait_timeout = dict(default=300),
tags=result.tags,
termination_policies=result.termination_policies,
vpc_zone_identifier=result.vpc_zone_identifier)
def delete_autoscaling_group(connection, module):
group_name = module.params.get('name')
groups = connection.get_all_groups(names=[group_name])
@ -426,13 +417,14 @@ def replace(connection, module):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('group_name')
group_name = module.params.get('name')
max_size = module.params.get('max_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
# FIXME: we need some more docs about this feature
replace_instances = module.params.get('replace_instances')
# wait for instance list to be populated on a newly provisioned ASG
instance_wait = time.time() + 30
while instance_wait > time.time():
@ -444,7 +436,7 @@ def replace(connection, module):
time.sleep(10)
if instance_wait <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too for instances to appear. %s" % time.asctime())
module.fail_json(msg = "Waited too long for instances to appear. %s" % time.asctime())
# determine if we need to continue
replaceable = 0
if replace_instances:
@ -470,7 +462,7 @@ def replace(connection, module):
props = get_properties(as_group)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too for instances to appear. %s" % time.asctime())
module.fail_json(msg = "Waited too long for instances to appear. %s" % time.asctime())
instances = props['instances']
if replace_instances:
instances = replace_instances
@ -490,7 +482,7 @@ def replace(connection, module):
def replace_batch(connection, module, replace_instances):
group_name = module.params.get('group_name')
group_name = module.params.get('name')
wait_timeout = int(module.params.get('wait_timeout'))
lc_check = module.params.get('lc_check')
@ -567,7 +559,7 @@ def main():
min_size=dict(type='int'),
max_size=dict(type='int'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='str'),
vpc_zone_identifier=dict(type='list'),
replace_batch_size=dict(type='int', default=1),
replace_all_instances=dict(type='bool', default=False),
replace_instances=dict(type='list', default=[]),
@ -577,9 +569,13 @@ def main():
tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
)
),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['replace_all_instances', 'replace_instances']]
)
module = AnsibleModule(argument_spec=argument_spec)
state = module.params.get('state')
replace_instances = module.params.get('replace_instances')
@ -591,16 +587,16 @@ def main():
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
changed = False
if replace_all_instances and replace_instances:
module.fail_json(msg="You can't use replace_instances and replace_all_instances in the same task.")
changed = create_changed = replace_changed = False
if state == 'present':
create_changed, asg_properties=create_autoscaling_group(connection, module)
if replace_all_instances or replace_instances:
replace_changed, asg_properties=replace(connection, module)
elif state == 'absent':
changed = delete_autoscaling_group(connection, module)
module.exit_json( changed = changed )
if replace_all_instances or replace_instances:
replace_changed, asg_properties=replace(connection, module)
if create_changed or replace_changed:
changed = True
module.exit_json( changed = changed, **asg_properties )

View file

@ -69,13 +69,13 @@ EXAMPLES = '''
ec2_eip: instance_id=i-1212f003
- name: allocate a new elastic IP without associating it to anything
ec2_eip:
action: ec2_eip
register: eip
- name: output the IP
debug: msg="Allocated IP is {{ eip.public_ip }}"
- name: provision new instances with ec2
ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes group=webserver count=3
ec2: keypair=mykey instance_type=c1.medium image=ami-40603AD1 wait=yes group=webserver count=3
register: ec2
- name: associate new elastic IPs with each of the instances
ec2_eip: "instance_id={{ item }}"

View file

@ -80,18 +80,18 @@ EXAMPLES = """
# basic pre_task and post_task example
pre_tasks:
- name: Gathering ec2 facts
ec2_facts:
action: ec2_facts
- name: Instance De-register
local_action: ec2_elb
args:
local_action:
module: ec2_elb
instance_id: "{{ ansible_ec2_instance_id }}"
state: 'absent'
roles:
- myrole
post_tasks:
- name: Instance Register
local_action: ec2_elb
args:
local_action:
module: ec2_elb
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
state: 'present'
@ -258,7 +258,7 @@ class ElbManager:
try:
elb = connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
except (boto.exception.NoAuthHandlerFound, StandardError), e:
self.module.fail_json(msg=str(e))
elbs = elb.get_all_load_balancers()
@ -278,7 +278,7 @@ class ElbManager:
try:
ec2 = connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
except (boto.exception.NoAuthHandlerFound, StandardError), e:
self.module.fail_json(msg=str(e))
return ec2.get_only_instances(instance_ids=[self.instance_id])[0]

View file

@ -115,7 +115,8 @@ EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example
# Basic provisioning example (non-VPC)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
@ -134,8 +135,8 @@ EXAMPLES = """
# ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Internal ELB example
# Basic VPC provisioning example
- local_action:
module: ec2_elb_lb
name: "test-vpc"
@ -214,7 +215,7 @@ EXAMPLES = """
name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890'
region: us-west-2
subnets: 'subnet-123456, subnet-67890'
subnets: 'subnet-123456,subnet-67890'
purge_subnets: yes
listeners:
- protocol: http
@ -374,7 +375,7 @@ class ElbManager(object):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
except (boto.exception.NoAuthHandlerFound, StandardError), e:
self.module.fail_json(msg=str(e))
def _delete_elb(self):

View file

@ -34,8 +34,6 @@ description:
- This module fetches data from the metadata servers in ec2 (aws) as per
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html.
The module must be called from within the EC2 instance itself.
Eucalyptus cloud provides a similar service and this module should
work with this cloud provider as well.
notes:
- Parameters to filter on ec2_facts may be added later.
author: "Silviu Dicu <silviudicu@gmail.com>"
@ -65,6 +63,7 @@ class Ec2Metadata(object):
AWS_REGIONS = ('ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-central-1',
'eu-west-1',
'sa-east-1',
'us-east-1',

View file

@ -55,7 +55,7 @@ options:
purge_rules_egress:
version_added: "1.8"
description:
- Purge existing rules_egree on security group that are not found in rules_egress
- Purge existing rules_egress on security group that are not found in rules_egress
required: false
default: 'true'
aliases: []
@ -70,8 +70,7 @@ notes:
EXAMPLES = '''
- name: example ec2 group
local_action:
module: ec2_group
ec2_group:
name: example
description: an example EC2 group
vpc_id: 12345
@ -102,6 +101,7 @@ EXAMPLES = '''
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
group_name: example-other
# description to use if example-other needs to be created
group_desc: other example EC2 group
@ -114,11 +114,21 @@ except ImportError:
sys.exit(1)
def make_rule_key(prefix, rule, group_id, cidr_ip):
"""Creates a unique key for an individual group rule"""
if isinstance(rule, dict):
proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')]
else: # isinstance boto.ec2.securitygroup.IPPermissions
proto, from_port, to_port = [getattr(rule, x, None) for x in ('ip_protocol', 'from_port', 'to_port')]
key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip)
return key.lower().replace('-none', '-None')
def addRulesToLookup(rules, prefix, dict):
for rule in rules:
for grant in rule.grants:
dict["%s-%s-%s-%s-%s-%s" % (prefix, rule.ip_protocol, rule.from_port, rule.to_port,
grant.group_id, grant.cidr_ip)] = rule
dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = rule
def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id):
@ -279,7 +289,7 @@ def main():
rule['to_port'] = None
# If rule already exists, don't later delete it
ruleId = "%s-%s-%s-%s-%s-%s" % ('in', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip)
ruleId = make_rule_key('in', rule, group_id, ip)
if ruleId in groupRules:
del groupRules[ruleId]
# Otherwise, add new rule
@ -320,7 +330,7 @@ def main():
rule['to_port'] = None
# If rule already exists, don't later delete it
ruleId = "%s-%s-%s-%s-%s-%s" % ('out', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip)
ruleId = make_rule_key('out', rule, group_id, ip)
if ruleId in groupRules:
del groupRules[ruleId]
# Otherwise, add new rule
@ -339,7 +349,7 @@ def main():
cidr_ip=ip)
changed = True
elif vpc_id and not module.check_mode:
# when using a vpc, but no egress rules are specified,
# when using a vpc, but no egress rules are specified,
# we add in a default allow all out rule, which was the
# default behavior before egress rules were added
default_egress_rule = 'out--1-None-None-None-0.0.0.0/0'

View file

@ -56,15 +56,13 @@ EXAMPLES = '''
# Creates a new ec2 key pair named `example` if not present, returns generated
# private key
- name: example ec2 key
local_action:
module: ec2_key
ec2_key:
name: example
# Creates a new ec2 key pair named `example` if not present using provided key
# material
# material. This could use the 'file' lookup plugin to pull this off disk.
- name: example2 ec2 key
local_action:
module: ec2_key
ec2_key:
name: example2
key_material: 'ssh-rsa AAAAxyz...== me@example.com'
state: present
@ -72,16 +70,14 @@ EXAMPLES = '''
# Creates a new ec2 key pair named `example` if not present using provided key
# material
- name: example3 ec2 key
local_action:
module: ec2_key
ec2_key:
name: example3
key_material: "{{ item }}"
with_file: /path/to/public_key.id_rsa.pub
# Removes ec2 key pair by name
- name: remove example key
local_action:
module: ec2_key
ec2_key:
name: example
state: absent
'''

7
cloud/ec2_lc.py → cloud/amazon/ec2_lc.py Executable file → Normal file
View file

@ -93,7 +93,6 @@ options:
description:
- Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.
required: false
default: false
aliases: []
version_added: "1.8"
ramdisk_id:
@ -125,7 +124,7 @@ EXAMPLES = '''
name: special
image_id: ami-XXX
key_name: default
security_groups: 'group,group2'
security_groups: ['group', 'group2' ]
instance_type: t1.micro
'''
@ -255,7 +254,7 @@ def main():
ebs_optimized=dict(default=False, type='bool'),
associate_public_ip_address=dict(type='bool'),
instance_monitoring=dict(default=False, type='bool'),
assign_public_ip=dict(default=False, type='bool')
assign_public_ip=dict(type='bool')
)
)
@ -265,7 +264,7 @@ def main():
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
state = module.params.get('state')

View file

@ -271,7 +271,7 @@ def main():
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
if state == 'present':

View file

@ -163,9 +163,7 @@ def main():
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
if not connection:
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound, e:
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg = str(e))
if state == 'present':

View file

@ -48,6 +48,32 @@ options:
- a hash/dictionary of tags to add to the snapshot
required: false
version_added: "1.6"
wait:
description:
- wait for the snapshot to be ready
choices: ['yes', 'no']
required: false
default: yes
version_added: "1.5.1"
wait_timeout:
description:
- how long before wait gives up, in seconds
- specify 0 to wait forever
required: false
default: 0
version_added: "1.5.1"
state:
description:
- whether to add or create a snapshot
required: false
default: present
choices: ['absent', 'present']
version_added: "1.9"
snapshot_id:
description:
- snapshot id to remove
required: false
version_added: "1.9"
author: Will Thames
extends_documentation_fragment: aws
@ -55,26 +81,29 @@ extends_documentation_fragment: aws
EXAMPLES = '''
# Simple snapshot of volume using volume_id
- local_action:
module: ec2_snapshot
- ec2_snapshot:
volume_id: vol-abcdef12
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume mounted on device_name attached to instance_id
- local_action:
module: ec2_snapshot
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume with tagging
- local_action:
module: ec2_snapshot
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
snapshot_tags:
frequency: hourly
source: /data
# Remove a snapshot
- local_action:
module: ec2_snapshot
snapshot_id: snap-abcd1234
state: absent
'''
import sys
@ -93,24 +122,28 @@ def main():
volume_id = dict(),
description = dict(),
instance_id = dict(),
snapshot_id = dict(),
device_name = dict(),
wait = dict(type='bool', default='true'),
wait_timeout = dict(default=0),
snapshot_tags = dict(type='dict', default=dict()),
state = dict(choices=['absent','present'], default='present'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
volume_id = module.params.get('volume_id')
snapshot_id = module.params.get('snapshot_id')
description = module.params.get('description')
instance_id = module.params.get('instance_id')
device_name = module.params.get('device_name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
snapshot_tags = module.params.get('snapshot_tags')
state = module.params.get('state')
if not volume_id and not instance_id or volume_id and instance_id:
module.fail_json('One and only one of volume_id or instance_id must be specified')
if not volume_id and not instance_id and not snapshot_id or volume_id and instance_id and snapshot_id:
module.fail_json('One and only one of volume_id or instance_id or snapshot_id must be specified')
if instance_id and not device_name or device_name and not instance_id:
module.fail_json('Instance ID and device name must both be specified')
@ -125,6 +158,20 @@ def main():
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if state == 'absent':
if not snapshot_id:
module.fail_json(msg = 'snapshot_id must be set when state is absent')
try:
snapshots = ec2.get_all_snapshots([snapshot_id])
ec2.delete_snapshot(snapshot_id)
module.exit_json(changed=True)
except boto.exception.BotoServerError, e:
# exception is raised if snapshot does not exist
if e.error_code == 'InvalidSnapshot.NotFound':
module.exit_json(changed=False)
else:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
try:
snapshot = ec2.create_snapshot(volume_id, description=description)
time_waited = 0

View file

@ -50,7 +50,7 @@ EXAMPLES = '''
# Basic example of adding tag(s)
tasks:
- name: tag a resource
local_action: ec2_tag resource=vol-XXXXXX region=eu-west-1 state=present
ec2_tag: resource=vol-XXXXXX region=eu-west-1 state=present
args:
tags:
Name: ubervol
@ -59,11 +59,11 @@ tasks:
# Playbook example of adding tag(s) to spawned instances
tasks:
- name: launch some instances
local_action: ec2 keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image_id }} wait=true region=eu-west-1
ec2: keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image_id }} wait=true region=eu-west-1
register: ec2
- name: tag my launched instances
local_action: ec2_tag resource={{ item.id }} region=eu-west-1 state=present
ec2_tag: resource={{ item.id }} region=eu-west-1 state=present
with_items: ec2.instances
args:
tags:
@ -71,11 +71,6 @@ tasks:
env: prod
'''
# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes.
# if state=present and it doesn't exist, create, tag and attach.
# Check for state by looking for volume attachment with tag (and against block device mapping?).
# Would personally like to revisit this in May when Eucalyptus also has tagging support (3.3).
import sys
import time

View file

@ -48,6 +48,14 @@ options:
required: false
default: null
aliases: []
volume_type:
description:
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default
and continues to remain the Ansible default for backwards compatibility.
required: false
default: standard
aliases: []
version_added: "1.9"
iops:
description:
- the provisioned IOPs you want to associate with this volume (integer).
@ -105,36 +113,31 @@ extends_documentation_fragment: aws
EXAMPLES = '''
# Simple attachment action
- local_action:
module: ec2_vol
- ec2_vol:
instance: XXXXXX
volume_size: 5
device_name: sdd
# Example using custom iops params
- local_action:
module: ec2_vol
- ec2_vol:
instance: XXXXXX
volume_size: 5
iops: 200
device_name: sdd
# Example using snapshot id
- local_action:
module: ec2_vol
- ec2_vol:
instance: XXXXXX
snapshot: "{{ snapshot }}"
# Playbook example combined with instance launch
- local_action:
module: ec2
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
wait: yes
count: 3
register: ec2
- local_action:
module: ec2_vol
- ec2_vol:
instance: "{{ item.id }} "
volume_size: 5
with_items: ec2.instances
@ -144,8 +147,7 @@ EXAMPLES = '''
# * Nothing will happen if the volume is already attached.
# * Volume must exist in the same zone.
- local_action:
module: ec2
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
zone: YYYYYY
@ -154,8 +156,7 @@ EXAMPLES = '''
count: 1
register: ec2
- local_action:
module: ec2_vol
- ec2_vol:
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvdf
@ -163,23 +164,28 @@ EXAMPLES = '''
register: ec2_vol
# Remove a volume
- local_action:
module: ec2_vol
- ec2_vol:
id: vol-XXXXXXXX
state: absent
# Detach a volume
- ec2_vol:
id: vol-XXXXXXXX
instance: None
# List volumes for an instance
- local_action:
module: ec2_vol
- ec2_vol:
instance: i-XXXXXX
state: list
# Create new volume using SSD storage
- ec2_vol:
instance: XXXXXX
volume_size: 50
volume_type: gp2
device_name: /dev/xvdf
'''
# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes.
# if state=present and it doesn't exist, create, tag and attach.
# Check for state by looking for volume attachment with tag (and against block device mapping?).
# Would personally like to revisit this in May when Eucalyptus also has tagging support (3.3).
import sys
import time
@ -253,22 +259,24 @@ def create_volume(module, ec2, zone):
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
snapshot = module.params.get('snapshot')
# If custom iops is defined we use volume_type "io1" rather than the default of "standard"
if iops:
volume_type = 'io1'
else:
volume_type = 'standard'
if instance == 'None' or instance == '':
instance = None
# If no instance supplied, try volume creation based on module parameters.
if name or id:
if not instance:
module.fail_json(msg = "If name or id is specified, instance must also be specified")
if iops or volume_size:
module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]")
volume = get_volume(module, ec2)
if volume.attachment_state() is not None:
if instance is None:
return volume
adata = volume.attach_data
if adata.instance_id != instance:
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
@ -330,6 +338,13 @@ def attach_volume(module, ec2, volume, instance):
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
def detach_volume(module, ec2):
vol = get_volume(module, ec2)
if not vol or vol.attachment_state() is None:
module.exit_json(changed=False)
else:
vol.detach()
module.exit_json(changed=True)
def main():
argument_spec = ec2_argument_spec()
@ -338,6 +353,7 @@ def main():
id = dict(),
name = dict(),
volume_size = dict(),
volume_type = dict(choices=['standard', 'gp2', 'io1'], default='standard'),
iops = dict(),
encrypted = dict(),
device_name = dict(),
@ -352,6 +368,7 @@ def main():
name = module.params.get('name')
instance = module.params.get('instance')
volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
device_name = module.params.get('device_name')
@ -359,6 +376,9 @@ def main():
snapshot = module.params.get('snapshot')
state = module.params.get('state')
if instance == 'None' or instance == '':
instance = None
ec2 = ec2_connect(module)
if state == 'list':
@ -425,7 +445,9 @@ def main():
volume = create_volume(module, ec2, zone)
if instance:
attach_volume(module, ec2, volume, inst)
module.exit_json(volume_id=volume.id, device=device_name)
else:
detach_volume(module, ec2)
module.exit_json(volume_id=volume.id, device=device_name, volume_type=volume.type)
# import module snippets
from ansible.module_utils.basic import *

View file

@ -130,16 +130,14 @@ EXAMPLES = '''
# It is assumed that their matching environment variables are set.
# Basic creation example:
local_action:
module: ec2_vpc
ec2_vpc:
state: present
cidr_block: 172.23.0.0/16
resource_tags: { "Environment":"Development" }
region: us-west-2
# Full creation example with subnets and optional availability zones.
# The absence or presence of subnets deletes or creates them respectively.
local_action:
module: ec2_vpc
ec2_vpc:
state: present
cidr_block: 172.22.0.0/16
resource_tags: { "Environment":"Development" }
@ -170,8 +168,7 @@ EXAMPLES = '''
register: vpc
# Removal of a VPC by id
local_action:
module: ec2_vpc
ec2_vpc:
state: absent
vpc_id: vpc-aaaaaaa
region: us-west-2

View file

@ -111,8 +111,7 @@ EXAMPLES = """
# It is assumed that their matching environment variables are set.
# Basic example
- local_action:
module: elasticache
- elasticache:
name: "test-please-delete"
state: present
engine: memcached
@ -126,14 +125,12 @@ EXAMPLES = """
# Ensure cache cluster is gone
- local_action:
module: elasticache
- elasticache:
name: "test-please-delete"
state: absent
# Reboot cache cluster
- local_action:
module: elasticache
- elasticache:
name: "test-please-delete"
state: rebooted
@ -360,7 +357,9 @@ class ElastiCacheManager(object):
'modifying': 'available',
'deleting': 'gone'
}
if self.status == awaited_status:
# No need to wait, we're already done
return
if status_map[self.status] != awaited_status:
msg = "Invalid awaited status. '%s' cannot transition to '%s'"
self.module.fail_json(msg=msg % (self.status, awaited_status))

View file

@ -224,44 +224,45 @@ requirements: [ "boto" ]
author: Bruce Pennypacker
'''
# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
EXAMPLES = '''
# Basic mysql provisioning example
- rds: >
command=create
instance_name=new_database
db_engine=MySQL
size=10
instance_type=db.m1.small
username=mysql_admin
password=1nsecure
- rds:
command: create
instance_name: new_database
db_engine: MySQL
size: 10
instance_type: db.m1.small
username: mysql_admin
password: 1nsecure
# Create a read-only replica and wait for it to become available
- rds: >
command=replicate
instance_name=new_database_replica
source_instance=new_database
wait=yes
wait_timeout=600
- rds:
command: replicate
instance_name: new_database_replica
source_instance: new_database
wait: yes
wait_timeout: 600
# Delete an instance, but create a snapshot before doing so
- rds: >
command=delete
instance_name=new_database
snapshot=new_database_snapshot
- rds:
command: delete
instance_name: new_database
snapshot: new_database_snapshot
# Get facts about an instance
- rds: >
command=facts
instance_name=new_database
register: new_database_facts
- rds:
command: facts
instance_name: new_database
register: new_database_facts
# Rename an instance and wait for the change to take effect
- rds: >
command=modify
instance_name=new_database
new_instance_name=renamed_database
wait=yes
- rds:
command: modify
instance_name: new_database
new_instance_name: renamed_database
wait: yes
'''
import sys

View file

@ -85,17 +85,18 @@ author: Scott Anderson
EXAMPLES = '''
# Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024
- rds_param_group: >
state=present
name=norwegian_blue
description=My Fancy Ex Parrot Group
engine=mysql5.6
params='{"auto_increment_increment": "42K"}'
- rds_param_group:
state: present
name: norwegian_blue
description: 'My Fancy Ex Parrot Group'
engine: 'mysql5.6'
params:
auto_increment_increment: "42K"
# Remove a parameter group
- rds_param_group: >
state=absent
name=norwegian_blue
- rds_param_group:
state: absent
name: norwegian_blue
'''
import sys

View file

@ -71,8 +71,7 @@ author: Scott Anderson
EXAMPLES = '''
# Add or change a subnet group
- local_action:
module: rds_subnet_group
- rds_subnet_group
state: present
name: norwegian-blue
description: My Fancy Ex Parrot Subnet Group
@ -80,10 +79,10 @@ EXAMPLES = '''
- subnet-aaaaaaaa
- subnet-bbbbbbbb
# Remove a parameter group
- rds_param_group: >
state=absent
name=norwegian-blue
# Remove a subnet group
- rds_subnet_group:
state: absent
name: norwegian-blue
'''
import sys

View file

@ -88,51 +88,54 @@ requirements: [ "boto" ]
author: Bruce Pennypacker
'''
# FIXME: the command stuff should have a more state like configuration alias -- MPD
EXAMPLES = '''
# Add new.foo.com as an A record with 3 IPs
- route53: >
command=create
zone=foo.com
record=new.foo.com
type=A
ttl=7200
value=1.1.1.1,2.2.2.2,3.3.3.3
- route53:
command: create
zone: foo.com
record: new.foo.com
type: A
ttl: 7200
value: 1.1.1.1,2.2.2.2,3.3.3.3
# Retrieve the details for new.foo.com
- route53: >
command=get
zone=foo.com
record=new.foo.com
type=A
- route53:
command: get
zone: foo.com
record: new.foo.com
type: A
register: rec
# Delete new.foo.com A record using the results from the get command
- route53: >
command=delete
zone=foo.com
record={{ rec.set.record }}
type={{ rec.set.type }}
value={{ rec.set.value }}
- route53:
command: delete
zone: foo.com
record: "{{ rec.set.record }}"
ttl: "{{ rec.set.ttl }}"
type: "{{ rec.set.type }}"
value: "{{ rec.set.value }}"
# Add an AAAA record. Note that because there are colons in the value
# that the entire parameter list must be quoted:
- route53: >
command=create
zone=foo.com
record=localhost.foo.com
type=AAAA
ttl=7200
value="::1"
- route53:
command: "create"
zone: "foo.com"
record: "localhost.foo.com"
type: "AAAA"
ttl: "7200"
value: "::1"
# Add a TXT record. Note that TXT and SPF records must be surrounded
# by quotes when sent to Route 53:
- route53: >
command=create
zone=foo.com
record=localhost.foo.com
type=TXT
ttl=7200
value="\"bar\""
- route53:
command: "create"
zone: "foo.com"
record: "localhost.foo.com"
type: "TXT"
ttl: "7200"
value: '"bar"'
'''
@ -160,7 +163,7 @@ def commit(changes, retry_interval):
code = code.split("</Code>")[0]
if code != 'PriorRequestNotComplete' or retry < 0:
raise e
time.sleep(retry_interval)
time.sleep(float(retry_interval))
def main():
argument_spec = ec2_argument_spec()
@ -178,9 +181,9 @@ def main():
module = AnsibleModule(argument_spec=argument_spec)
command_in = module.params.get('command')
zone_in = module.params.get('zone')
zone_in = module.params.get('zone').lower()
ttl_in = module.params.get('ttl')
record_in = module.params.get('record')
record_in = module.params.get('record').lower()
type_in = module.params.get('type')
value_in = module.params.get('value')
retry_interval_in = module.params.get('retry_interval')

View file

@ -68,7 +68,7 @@ options:
aliases: []
s3_url:
description:
- "S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined. Ansible tries to guess if fakes3 (https://github.com/jubos/fake-s3) or Eucalyptus Walrus (https://github.com/eucalyptus/eucalyptus/wiki/Walrus) is used and configure connection accordingly. Current heuristic is: everything with scheme fakes3:// is fakes3, everything else not ending with amazonaws.com is Walrus."
- "S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS"
default: null
aliases: [ S3_URL ]
aws_secret_key:
@ -103,28 +103,19 @@ author: Lester Wade, Ralph Tice
EXAMPLES = '''
# Simple PUT operation
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put
# Simple GET operation
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get
# GET/download and overwrite local file (trust remote)
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get
# GET/download and do not overwrite local file (trust remote)
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get force=false
# PUT/upload and overwrite remote file (trust local)
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put
# PUT/upload with metadata
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip'
# PUT/upload with multiple metadata
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache'
# PUT/upload and do not overwrite remote file (trust local)
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put force=false
# Download an object as a string to use else where in your playbook
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=getstr
# Create an empty bucket
- s3: bucket=mybucket mode=create
# Create a bucket with key as directory
- s3: bucket=mybucket object=/my/directory/path mode=create
# Create an empty bucket in the EU region
- s3: bucket=mybucket mode=create region=eu-west-1
# Create a bucket with key as directory, in the EU region
- s3: bucket=mybucket object=/my/directory/path mode=create region=eu-west-1
# Delete a bucket and all contents
- s3: bucket=mybucket mode=delete
'''

0
cloud/azure/__init__.py Normal file
View file

View file

View file

@ -236,7 +236,8 @@ class Droplet(JsonfyMixIn):
@classmethod
def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False):
json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking, backups_enabled)
private_networking_lower = str(private_networking).lower()
json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking_lower, backups_enabled)
droplet = cls(json)
return droplet

View file

@ -27,7 +27,7 @@ options:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'active', 'absent', 'deleted']
choices: ['present', 'absent']
client_id:
description:
- DigitalOcean manager id.
@ -145,7 +145,7 @@ class Domain(JsonfyMixIn):
return False
domains = Domain.list_all()
if id is not None:
for domain in domains:
if domain.id == id:
@ -181,7 +181,7 @@ def core(module):
if not domain:
domain = Domain.find(name=getkeyordie("name"))
if not domain:
domain = Domain.add(getkeyordie("name"),
getkeyordie("ip"))
@ -203,10 +203,10 @@ def core(module):
domain = None
if "id" in module.params:
domain = Domain.find(id=module.params["id"])
if not domain and "name" in module.params:
domain = Domain.find(name=module.params["name"])
if not domain:
module.exit_json(changed=False, msg="Domain not found.")
@ -217,7 +217,7 @@ def core(module):
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'),
state = dict(choices=['present', 'absent'], default='present'),
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
api_key = dict(aliases=['API_KEY'], no_log=True),
name = dict(type='str'),

0
cloud/docker/__init__.py Normal file
View file

View file

@ -23,6 +23,7 @@
DOCUMENTATION = '''
---
module: docker_image
deprecated: "functions are being rolled into the 'docker' module"
author: Pavel Antonov
version_added: "1.5"
short_description: manage docker images

View file

@ -77,7 +77,7 @@ options:
version_added: "1.5"
volumes:
description:
- Set volume(s) to mount on the container
- Set volume(s) to mount on the container separated with a comma (,) and in the format "source:dest[:rights]"
required: false
default: null
aliases: []
@ -96,11 +96,11 @@ options:
version_added: "1.5"
memory_limit:
description:
- Set RAM allocated to container
- Set RAM allocated to container. It will be passed as a number of bytes. For example 1048576 = 1Gb
required: false
default: null
aliases: []
default: 256MB
default: 262144
docker_url:
description:
- URL of docker host to issue commands to
@ -126,6 +126,12 @@ options:
required: false
default: null
aliases: []
email:
description:
- Set remote API email
required: false
default: null
aliases: []
hostname:
description:
- Set container hostname
@ -204,6 +210,27 @@ options:
default: ''
aliases: []
version_added: "1.8"
restart_policy:
description:
- Set the container restart policy
required: false
default: false
aliases: []
version_added: "1.9"
restart_policy_retry:
description:
- Set the retry limit for container restart policy
required: false
default: false
aliases: []
version_added: "1.9"
insecure_registry:
description:
- Use insecure private registry by HTTP instead of HTTPS (needed for docker-py >= 0.5.0).
required: false
default: false
aliases: []
version_added: "1.9"
author: Cove Schneider, Joshua Conner, Pavel Antonov
requirements: [ "docker-py >= 0.3.0", "docker >= 0.10.0" ]
@ -336,10 +363,11 @@ try:
except ImportError, e:
HAS_DOCKER_PY = False
try:
from docker.errors import APIError as DockerAPIError
except ImportError:
from docker.client import APIError as DockerAPIError
if HAS_DOCKER_PY:
try:
from docker.errors import APIError as DockerAPIError
except ImportError:
from docker.client import APIError as DockerAPIError
def _human_to_bytes(number):
@ -369,9 +397,81 @@ def _docker_id_quirk(inspect):
del inspect['ID']
return inspect
class DockerManager:
def get_split_image_tag(image):
# If image contains a host or org name, omit that from our check
if '/' in image:
registry, resource = image.rsplit('/', 1)
else:
registry, resource = None, image
# now we can determine if image has a tag
if ':' in resource:
resource, tag = resource.split(':', 1)
if registry:
resource = '/'.join((registry, resource))
else:
tag = "latest"
resource = image
return resource, tag
def get_docker_py_versioninfo():
if hasattr(docker, '__version__'):
# a '__version__' attribute was added to the module but not until
# after 0.3.0 was pushed to pypi. If it's there, use it.
version = []
for part in docker.__version__.split('.'):
try:
version.append(int(part))
except ValueError:
for idx, char in enumerate(part):
if not char.isdigit():
nondigit = part[idx:]
digit = part[:idx]
if digit:
version.append(int(digit))
if nondigit:
version.append(nondigit)
elif hasattr(docker.Client, '_get_raw_response_socket'):
# HACK: if '__version__' isn't there, we check for the existence of
# `_get_raw_response_socket` in the docker.Client class, which was
# added in 0.3.0
version = (0, 3, 0)
else:
# This is untrue but this module does not function with a version less
# than 0.3.0 so it's okay to lie here.
version = (0,)
return tuple(version)
def check_dependencies(module):
"""
Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a
helpful error message if it isn't.
"""
if not HAS_DOCKER_PY:
module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.")
else:
versioninfo = get_docker_py_versioninfo()
if versioninfo < (0, 3, 0):
module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.")
class DockerManager(object):
counters = {'created':0, 'started':0, 'stopped':0, 'killed':0, 'removed':0, 'restarted':0, 'pull':0}
_capabilities = set()
# Map optional parameters to minimum (docker-py version, server APIVersion)
# docker-py version is a tuple of ints because we have to compare them
# server APIVersion is passed to a docker-py function that takes strings
_cap_ver_req = {
'dns': ((0, 3, 0), '1.10'),
'volumes_from': ((0, 3, 0), '1.10'),
'restart_policy': ((0, 5, 0), '1.14'),
# Clientside only
'insecure_registry': ((0, 5, 0), '0.0')
}
def __init__(self, module):
self.module = module
@ -424,8 +524,50 @@ class DockerManager:
# connect to docker server
docker_url = urlparse(module.params.get('docker_url'))
docker_api_version = module.params.get('docker_api_version')
if not docker_api_version:
docker_api_version=docker.client.DEFAULT_DOCKER_API_VERSION
self.client = docker.Client(base_url=docker_url.geturl(), version=docker_api_version)
self.docker_py_versioninfo = get_docker_py_versioninfo()
def _check_capabilties(self):
"""
Create a list of available capabilities
"""
api_version = self.client.version()['ApiVersion']
for cap, req_vers in self._cap_ver_req.items():
if (self.docker_py_versioninfo >= req_vers[0] and
docker.utils.compare_version(req_vers[1], api_version) >= 0):
self._capabilities.add(cap)
def ensure_capability(self, capability, fail=True):
"""
Some of the functionality this ansible module implements are only
available in newer versions of docker. Ensure that the capability
is available here.
If fail is set to False then return True or False depending on whether
we have the capability. Otherwise, simply fail and exit the module if
we lack the capability.
"""
if not self._capabilities:
self._check_capabilties()
if capability in self._capabilities:
return True
if not fail:
return False
api_version = self.client.version()['ApiVersion']
self.module.fail_json(msg='Specifying the `%s` parameter requires'
' docker-py: %s, docker server apiversion %s; found'
' docker-py: %s, server: %s' % (
capability,
'.'.join(self._cap_ver_req[capability][0]),
self._cap_ver_req[capability][1],
'.'.join(self.docker_py_versioninfo),
api_version))
def get_links(self, links):
"""
@ -505,24 +647,6 @@ class DockerManager:
return binds
def get_split_image_tag(self, image):
# If image contains a host or org name, omit that from our check
if '/' in image:
registry, resource = image.rsplit('/', 1)
else:
registry, resource = None, image
# now we can determine if image has a tag
if ':' in resource:
resource, tag = resource.split(':', 1)
if registry:
resource = '/'.join((registry, resource))
else:
tag = "latest"
resource = image
return resource, tag
def get_summary_counters_msg(self):
msg = ""
for k, v in self.counters.iteritems():
@ -562,10 +686,10 @@ class DockerManager:
# if we weren't given a tag with the image, we need to only compare on the image name, as that
# docker will give us back the full image name including a tag in the container list if one exists.
image, tag = self.get_split_image_tag(image)
image, tag = get_split_image_tag(image)
for i in self.client.containers(all=True):
running_image, running_tag = self.get_split_image_tag(i['Image'])
running_image, running_tag = get_split_image_tag(i['Image'])
running_command = i['Command'].strip()
name_matches = False
@ -604,11 +728,20 @@ class DockerManager:
'name': self.module.params.get('name'),
'stdin_open': self.module.params.get('stdin_open'),
'tty': self.module.params.get('tty'),
'dns': self.module.params.get('dns'),
'volumes_from': self.module.params.get('volumes_from'),
}
if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) < 0:
params['dns'] = self.module.params.get('dns')
params['volumes_from'] = self.module.params.get('volumes_from')
if params['dns'] is not None:
self.ensure_capability('dns')
if params['volumes_from'] is not None:
self.ensure_capability('volumes_from')
extra_params = {}
if self.module.params.get('insecure_registry'):
if self.ensure_capability('insecure_registry', fail=False):
extra_params['insecure_registry'] = self.module.params.get('insecure_registry')
def do_create(count, params):
results = []
@ -623,7 +756,7 @@ class DockerManager:
containers = do_create(count, params)
except:
resource = self.module.params.get('image')
image, tag = self.get_split_image_tag(resource)
image, tag = get_split_image_tag(resource)
if self.module.params.get('username'):
try:
self.client.login(
@ -635,7 +768,7 @@ class DockerManager:
except:
self.module.fail_json(msg="failed to login to the remote registry, check your username/password.")
try:
self.client.pull(image, tag=tag)
self.client.pull(image, tag=tag, **extra_params)
except:
self.module.fail_json(msg="failed to pull the specified image: %s" % resource)
self.increment_counter('pull')
@ -653,9 +786,24 @@ class DockerManager:
'links': self.links,
'network_mode': self.module.params.get('net'),
}
if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) >= 0 and hasattr(docker, '__version__') and docker.__version__ > '0.3.0':
params['dns'] = self.module.params.get('dns')
params['volumes_from'] = self.module.params.get('volumes_from')
optionals = {}
for optional_param in ('dns', 'volumes_from', 'restart_policy', 'restart_policy_retry'):
optionals[optional_param] = self.module.params.get(optional_param)
if optionals['dns'] is not None:
self.ensure_capability('dns')
params['dns'] = optionals['dns']
if optionals['volumes_from'] is not None:
self.ensure_capability('volumes_from')
params['volumes_from'] = optionals['volumes_from']
if optionals['restart_policy'] is not None:
self.ensure_capability('restart_policy')
params['restart_policy'] = { 'Name': optionals['restart_policy'] }
if params['restart_policy']['Name'] == 'on-failure':
params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry']
for i in containers:
self.client.start(i['Id'], **params)
@ -684,31 +832,6 @@ class DockerManager:
self.increment_counter('restarted')
def check_dependencies(module):
"""
Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a
helpful error message if it isn't.
"""
if not HAS_DOCKER_PY:
module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.")
else:
HAS_NEW_ENOUGH_DOCKER_PY = False
if hasattr(docker, '__version__'):
# a '__version__' attribute was added to the module but not until
# after 0.3.0 was added pushed to pip. If it's there, use it.
if docker.__version__ >= '0.3.0':
HAS_NEW_ENOUGH_DOCKER_PY = True
else:
# HACK: if '__version__' isn't there, we check for the existence of
# `_get_raw_response_socket` in the docker.Client class, which was
# added in 0.3.0
if hasattr(docker.Client, '_get_raw_response_socket'):
HAS_NEW_ENOUGH_DOCKER_PY = True
if not HAS_NEW_ENOUGH_DOCKER_PY:
module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.")
def main():
module = AnsibleModule(
argument_spec = dict(
@ -724,7 +847,7 @@ def main():
memory_limit = dict(default=0),
memory_swap = dict(default=0),
docker_url = dict(default='unix://var/run/docker.sock'),
docker_api_version = dict(default=docker.client.DEFAULT_DOCKER_API_VERSION),
docker_api_version = dict(),
username = dict(default=None),
password = dict(),
email = dict(),
@ -734,13 +857,16 @@ def main():
dns = dict(),
detach = dict(default=True, type='bool'),
state = dict(default='running', choices=['absent', 'present', 'running', 'stopped', 'killed', 'restarted']),
restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']),
restart_policy_retry = dict(default=0, type='int'),
debug = dict(default=False, type='bool'),
privileged = dict(default=False, type='bool'),
stdin_open = dict(default=False, type='bool'),
tty = dict(default=False, type='bool'),
lxc_conf = dict(default=None, type='list'),
name = dict(default=None),
net = dict(default=None)
net = dict(default=None),
insecure_registry = dict(default=False, type='bool'),
)
)
@ -851,4 +977,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
main()
if __name__ == '__main__':
main()

0
cloud/google/__init__.py Normal file
View file

View file

@ -319,11 +319,12 @@ def handle_create(module, gs, bucket, obj):
else:
module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket))
if bucket and obj:
if obj.endswith('/'):
dirobj = obj
else:
dirobj = obj + "/"
if bucket_check(module, gs, bucket):
if obj.endswith('/'):
dirobj = obj
else:
dirobj = obj + "/"
if key_check(module, gs, bucket, dirobj):
module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
else:

0
cloud/gce.py → cloud/google/gce.py Executable file → Normal file
View file

View file

@ -35,7 +35,7 @@ options:
description:
- the protocol:ports to allow ('tcp:80' or 'tcp:80,443' or 'tcp:80-800')
required: false
default: null
default: null
aliases: []
ipv4_range:
description:
@ -101,15 +101,16 @@ author: Eric Johnson <erjohnso@google.com>
EXAMPLES = '''
# Simple example of creating a new network
- local_action:
- local_action:
module: gce_net
name: privatenet
ipv4_range: '10.240.16.0/24'
# Simple example of creating a new firewall rule
- local_action:
- local_action:
module: gce_net
name: privatenet
fwname: all-web-webproxy
allowed: tcp:80,8080
src_tags: ["web", "proxy"]
@ -155,7 +156,7 @@ def main():
ipv4_range = dict(),
fwname = dict(),
name = dict(),
src_range = dict(),
src_range = dict(type='list'),
src_tags = dict(type='list'),
state = dict(default='present'),
service_account_email = dict(),

0
cloud/linode/__init__.py Normal file
View file

View file

View file

@ -254,7 +254,7 @@ def main():
else:
_glance_delete_image(module, module.params, client)
# this is magic, see lib/ansible/module.params['common.py
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

View file

@ -291,6 +291,9 @@ def main():
argument_spec.update(dict(
tenant_description=dict(required=False),
email=dict(required=False),
user=dict(required=False),
tenant=dict(required=False),
password=dict(required=False),
role=dict(required=False),
state=dict(default='present', choices=['present', 'absent']),
endpoint=dict(required=False,

View file

@ -121,10 +121,10 @@ options:
description:
- Should a floating ip be auto created and assigned
required: false
default: 'yes'
default: 'no'
version_added: "1.8"
floating_ips:
decription:
description:
- list of valid floating IPs that pre-exist to assign to this node
required: false
default: None
@ -405,7 +405,7 @@ def _get_flavor_id(module, nova):
if (flavor.ram >= module.params['flavor_ram'] and
(not module.params['flavor_include'] or module.params['flavor_include'] in flavor.name)):
return flavor.id
module.fail_json(msg = "Error finding flavor with %sMB of RAM" % module.params['flavor_ram'])
module.fail_json(msg = "Error finding flavor with %sMB of RAM" % module.params['flavor_ram'])
return module.params['flavor_id']

View file

View file

@ -64,7 +64,10 @@ options:
exact_count:
description:
- Explicitly ensure an exact count of instances, used with
state=active/present
state=active/present. If specified as C(yes) and I(count) is less than
the servers matched, servers will be deleted to match the count. If
the number of matched servers is fewer than specified in I(count)
additional servers will be added.
default: no
choices:
- "yes"
@ -150,6 +153,12 @@ options:
- how long before wait gives up, in seconds
default: 300
author: Jesse Keating, Matt Martz
notes:
- I(exact_count) can be "destructive" if the number of running servers in
the I(group) is larger than that specified in I(count). In such a case, the
I(state) is effectively set to C(absent) and the extra servers are deleted.
In the case of deletion, the returned data structure will have C(action)
set to C(delete), and the oldest servers in the group will be deleted.
extends_documentation_fragment: rackspace.openstack
'''
@ -441,79 +450,102 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
if group is None:
module.fail_json(msg='"group" must be provided when using '
'"exact_count"')
else:
if auto_increment:
numbers = set()
# See if the name is a printf like string, if not append
# %d to the end
try:
name % 0
except TypeError, e:
if e.message.startswith('not all'):
name = '%s%%d' % name
else:
module.fail_json(msg=e.message)
# regex pattern to match printf formatting
pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
for server in cs.servers.list():
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if server.metadata.get('group') == group:
servers.append(server)
match = re.search(pattern, server.name)
if match:
number = int(match.group(1))
numbers.add(number)
number_range = xrange(count_offset, count_offset + count)
available_numbers = list(set(number_range)
.difference(numbers))
else: # Not auto incrementing
for server in cs.servers.list():
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if server.metadata.get('group') == group:
servers.append(server)
# available_numbers not needed here, we inspect auto_increment
# again later
# If state was absent but the count was changed,
# assume we only wanted to remove that number of instances
if was_absent:
diff = len(servers) - count
if diff < 0:
count = 0
else:
count = diff
if len(servers) > count:
# We have more servers than we need, set state='absent'
# and delete the extras, this should delete the oldest
state = 'absent'
kept = servers[:count]
del servers[:count]
instance_ids = []
for server in servers:
instance_ids.append(server.id)
delete(module, instance_ids=instance_ids, wait=wait,
wait_timeout=wait_timeout, kept=kept)
elif len(servers) < count:
# we have fewer servers than we need
if auto_increment:
numbers = set()
try:
name % 0
except TypeError, e:
if e.message.startswith('not all'):
name = '%s%%d' % name
else:
module.fail_json(msg=e.message)
pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
for server in cs.servers.list():
if server.metadata.get('group') == group:
servers.append(server)
match = re.search(pattern, server.name)
if match:
number = int(match.group(1))
numbers.add(number)
number_range = xrange(count_offset, count_offset + count)
available_numbers = list(set(number_range)
.difference(numbers))
# auto incrementing server numbers
names = []
name_slice = count - len(servers)
numbers_to_use = available_numbers[:name_slice]
for number in numbers_to_use:
names.append(name % number)
else:
for server in cs.servers.list():
if server.metadata.get('group') == group:
servers.append(server)
# If state was absent but the count was changed,
# assume we only wanted to remove that number of instances
if was_absent:
diff = len(servers) - count
if diff < 0:
count = 0
else:
count = diff
if len(servers) > count:
state = 'absent'
kept = servers[:count]
del servers[:count]
instance_ids = []
for server in servers:
instance_ids.append(server.id)
delete(module, instance_ids=instance_ids, wait=wait,
wait_timeout=wait_timeout, kept=kept)
elif len(servers) < count:
if auto_increment:
names = []
name_slice = count - len(servers)
numbers_to_use = available_numbers[:name_slice]
for number in numbers_to_use:
names.append(name % number)
else:
names = [name] * (count - len(servers))
else:
instances = []
instance_ids = []
for server in servers:
instances.append(rax_to_dict(server, 'server'))
instance_ids.append(server.id)
module.exit_json(changed=False, action=None,
instances=instances,
success=[], error=[], timeout=[],
instance_ids={'instances': instance_ids,
'success': [], 'error': [],
'timeout': []})
else:
# We are not auto incrementing server numbers,
# create a list of 'name' that matches how many we need
names = [name] * (count - len(servers))
else:
# we have the right number of servers, just return info
# about all of the matched servers
instances = []
instance_ids = []
for server in servers:
instances.append(rax_to_dict(server, 'server'))
instance_ids.append(server.id)
module.exit_json(changed=False, action=None,
instances=instances,
success=[], error=[], timeout=[],
instance_ids={'instances': instance_ids,
'success': [], 'error': [],
'timeout': []})
else: # not called with exact_count=True
if group is not None:
if auto_increment:
# we are auto incrementing server numbers, but not with
# exact_count
numbers = set()
# See if the name is a printf like string, if not append
# %d to the end
try:
name % 0
except TypeError, e:
@ -522,8 +554,12 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
else:
module.fail_json(msg=e.message)
# regex pattern to match printf formatting
pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
for server in cs.servers.list():
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if server.metadata.get('group') == group:
servers.append(server)
match = re.search(pattern, server.name)
@ -540,8 +576,11 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
for number in numbers_to_use:
names.append(name % number)
else:
# Not auto incrementing
names = [name] * count
else:
# No group was specified, and not using exact_count
# Perform more simplistic matching
search_opts = {
'name': '^%s$' % name,
'image': image,
@ -549,11 +588,18 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
}
servers = []
for server in cs.servers.list(search_opts=search_opts):
# Ignore DELETED servers
if server.status == 'DELETED':
continue
# Ignore servers with non matching metadata
if server.metadata != meta:
continue
servers.append(server)
if len(servers) >= count:
# We have more servers than were requested, don't do
# anything. Not running with exact_count=True, so we assume
# more is OK
instances = []
for server in servers:
instances.append(rax_to_dict(server, 'server'))
@ -566,6 +612,8 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
'success': [], 'error': [],
'timeout': []})
# We need more servers to reach out target, create names for
# them, we aren't performing auto_increment here
names = [name] * (count - len(servers))
create(module, names=names, flavor=flavor, image=image,
@ -577,6 +625,8 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
elif state == 'absent':
if instance_ids is None:
# We weren't given an explicit list of server IDs to delete
# Let's match instead
for arg, value in dict(name=name, flavor=flavor,
image=image).iteritems():
if not value:
@ -588,10 +638,15 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
'flavor': flavor
}
for server in cs.servers.list(search_opts=search_opts):
# Ignore DELETED servers
if server.status == 'DELETED':
continue
# Ignore servers with non matching metadata
if meta != server.metadata:
continue
servers.append(server)
# Build a list of server IDs to delete
instance_ids = []
for server in servers:
if len(instance_ids) < count:
@ -600,6 +655,8 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
break
if not instance_ids:
# No server IDs were matched for deletion, or no IDs were
# explicitly provided, just exit and don't do anything
module.exit_json(changed=False, action=None, instances=[],
success=[], error=[], timeout=[],
instance_ids={'instances': [],

View file

@ -108,10 +108,6 @@ except ImportError:
def cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout):
for arg in (state, name, size, volume_type):
if not arg:
module.fail_json(msg='%s is required for rax_cbs' % arg)
if size < 100:
module.fail_json(msg='"size" must be greater than or equal to 100')
@ -145,10 +141,7 @@ def cloud_block_storage(module, state, name, description, meta, size,
attempts=attempts)
volume.get()
for key, value in vars(volume).iteritems():
if (isinstance(value, NON_CALLABLES) and
not key.startswith('_')):
instance[key] = value
instance = rax_to_dict(volume)
result = dict(changed=changed, volume=instance)
@ -164,6 +157,7 @@ def cloud_block_storage(module, state, name, description, meta, size,
elif state == 'absent':
if volume:
instance = rax_to_dict(volume)
try:
volume.delete()
changed = True

View file

@ -90,11 +90,6 @@ except ImportError:
def cloud_block_storage_attachments(module, state, volume, server, device,
wait, wait_timeout):
for arg in (state, volume, server, device):
if not arg:
module.fail_json(msg='%s is required for rax_cbs_attachments' %
arg)
cbs = pyrax.cloud_blockstorage
cs = pyrax.cloudservers
@ -133,7 +128,7 @@ def cloud_block_storage_attachments(module, state, volume, server, device,
not key.startswith('_')):
instance[key] = value
result = dict(changed=changed, volume=instance)
result = dict(changed=changed)
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
@ -142,6 +137,9 @@ def cloud_block_storage_attachments(module, state, volume, server, device,
pyrax.utils.wait_until(volume, 'status', 'in-use',
interval=5, attempts=attempts)
volume.get()
result['volume'] = rax_to_dict(volume)
if 'msg' in result:
module.fail_json(**result)
else:
@ -167,12 +165,7 @@ def cloud_block_storage_attachments(module, state, volume, server, device,
elif volume.attachments:
module.fail_json(msg='Volume is attached to another server')
for key, value in vars(volume).iteritems():
if (isinstance(value, NON_CALLABLES) and
not key.startswith('_')):
instance[key] = value
result = dict(changed=changed, volume=instance)
result = dict(changed=changed, volume=rax_to_dict(volume))
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id

View file

@ -140,10 +140,6 @@ except ImportError:
def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
vip_type, timeout, wait, wait_timeout, vip_id):
for arg in (state, name, port, protocol, vip_type):
if not arg:
module.fail_json(msg='%s is required for rax_clb' % arg)
if int(timeout) < 30:
module.fail_json(msg='"timeout" must be greater than or equal to 30')
@ -257,7 +253,7 @@ def main():
algorithm=dict(choices=CLB_ALGORITHMS,
default='LEAST_CONNECTIONS'),
meta=dict(type='dict', default={}),
name=dict(),
name=dict(required=True),
port=dict(type='int', default=80),
protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'),
state=dict(default='present', choices=['present', 'absent']),

View file

@ -150,21 +150,6 @@ def _get_node(lb, node_id=None, address=None, port=None):
return None
def _is_primary(node):
"""Return True if node is primary and enabled"""
return (node.type.lower() == 'primary' and
node.condition.lower() == 'enabled')
def _get_primary_nodes(lb):
"""Return a list of primary and enabled nodes"""
nodes = []
for node in lb.nodes:
if _is_primary(node):
nodes.append(node)
return nodes
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
@ -230,13 +215,6 @@ def main():
if state == 'absent':
if not node: # Removing a non-existent node
module.exit_json(changed=False, state=state)
# The API detects this as well but currently pyrax does not return a
# meaningful error message
if _is_primary(node) and len(_get_primary_nodes(lb)) == 1:
module.fail_json(
msg='At least one primary node has to be enabled')
try:
lb.delete_node(node)
result = {}
@ -299,5 +277,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
# invoke the module
main()

View file

@ -55,10 +55,6 @@ except ImportError:
def cloud_identity(module, state, identity):
for arg in (state, identity):
if not arg:
module.fail_json(msg='%s is required for rax_identity' % arg)
instance = dict(
authenticated=identity.authenticated,
credentials=identity._creds_file
@ -79,7 +75,7 @@ def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent'])
state=dict(default='present', choices=['present'])
)
)
@ -95,7 +91,7 @@ def main():
setup_rax_module(module, pyrax)
if pyrax.identity is None:
if not pyrax.identity:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
@ -106,5 +102,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
# invoke the module
main()

View file

@ -104,7 +104,7 @@ def rax_keypair(module, name, public_key, state):
keypair = {}
if state == 'present':
if os.path.isfile(public_key):
if public_key and os.path.isfile(public_key):
try:
f = open(public_key)
public_key = f.read()
@ -143,7 +143,7 @@ def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(),
name=dict(required=True),
public_key=dict(),
state=dict(default='present', choices=['absent', 'present']),
)

View file

@ -65,10 +65,6 @@ except ImportError:
def cloud_network(module, state, label, cidr):
for arg in (state, label, cidr):
if not arg:
module.fail_json(msg='%s is required for cloud_networks' % arg)
changed = False
network = None
networks = []
@ -79,6 +75,9 @@ def cloud_network(module, state, label, cidr):
'incorrectly capitalized region name.')
if state == 'present':
if not cidr:
module.fail_json(msg='missing required arguments: cidr')
try:
network = pyrax.cloud_networks.find_network_by_label(label)
except pyrax.exceptions.NetworkNotFound:
@ -115,7 +114,7 @@ def main():
dict(
state=dict(default='present',
choices=['present', 'absent']),
label=dict(),
label=dict(required=True),
cidr=dict()
)
)

View file

@ -24,6 +24,14 @@ description:
- Manipulate Rackspace Cloud Autoscale Groups
version_added: 1.7
options:
config_drive:
description:
- Attach read-only configuration drive to server as label config-2
default: no
choices:
- "yes"
- "no"
version_added: 1.8
cooldown:
description:
- The period of time, in seconds, that must pass before any scaling can
@ -92,6 +100,11 @@ options:
- present
- absent
default: present
user_data:
description:
- Data to be uploaded to the servers config drive. This option implies
I(config_drive). Can be a file path or a string
version_added: 1.8
author: Matt Martz
extends_documentation_fragment: rackspace
'''
@ -118,6 +131,8 @@ EXAMPLES = '''
register: asg
'''
import base64
try:
import pyrax
HAS_PYRAX = True
@ -128,17 +143,27 @@ except ImportError:
def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
image=None, key_name=None, loadbalancers=[], meta={},
min_entities=0, max_entities=0, name=None, networks=[],
server_name=None, state='present'):
server_name=None, state='present', user_data=None,
config_drive=False):
changed = False
au = pyrax.autoscale
cnw = pyrax.cloud_networks
cs = pyrax.cloudservers
if not au or not cnw or not cs:
if not au:
module.fail_json(msg='Failed to instantiate clients. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if user_data:
config_drive = True
if user_data and os.path.isfile(user_data):
try:
f = open(user_data)
user_data = f.read()
f.close()
except Exception, e:
module.fail_json(msg='Failed to load %s' % user_data)
if state == 'present':
# Normalize and ensure all metadata values are strings
if meta:
@ -184,8 +209,16 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
lbs = []
if loadbalancers:
for lb in loadbalancers:
lb_id = lb.get('id')
port = lb.get('port')
try:
lb_id = int(lb.get('id'))
except (ValueError, TypeError):
module.fail_json(msg='Load balancer ID is not an integer: '
'%s' % lb.get('id'))
try:
port = int(lb.get('port'))
except (ValueError, TypeError):
module.fail_json(msg='Load balancer port is not an '
'integer: %s' % lb.get('port'))
if not lb_id or not port:
continue
lbs.append((lb_id, port))
@ -202,9 +235,10 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
launch_config_type='launch_server',
server_name=server_name, image=image,
flavor=flavor, disk_config=disk_config,
metadata=meta, personality=files,
metadata=meta, personality=personality,
networks=nics, load_balancers=lbs,
key_name=key_name)
key_name=key_name, config_drive=config_drive,
user_data=user_data)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
@ -237,14 +271,23 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
if flavor != lc.get('flavor'):
lc_args['flavor'] = flavor
if disk_config != lc.get('disk_config'):
disk_config = disk_config or 'AUTO'
if ((disk_config or lc.get('disk_config')) and
disk_config != lc.get('disk_config')):
lc_args['disk_config'] = disk_config
if meta != lc.get('metadata'):
if (meta or lc.get('meta')) and meta != lc.get('metadata'):
lc_args['metadata'] = meta
if files != lc.get('personality'):
lc_args['personality'] = files
test_personality = []
for p in personality:
test_personality.append({
'path': p['path'],
'contents': base64.b64encode(p['contents'])
})
if ((test_personality or lc.get('personality')) and
test_personality != lc.get('personality')):
lc_args['personality'] = personality
if nics != lc.get('networks'):
lc_args['networks'] = nics
@ -256,6 +299,13 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
if key_name != lc.get('key_name'):
lc_args['key_name'] = key_name
if config_drive != lc.get('config_drive'):
lc_args['config_drive'] = config_drive
if (user_data and
base64.b64encode(user_data) != lc.get('user_data')):
lc_args['user_data'] = user_data
if lc_args:
# Work around for https://github.com/rackspace/pyrax/pull/389
if 'flavor' not in lc_args:
@ -284,9 +334,10 @@ def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
config_drive=dict(default=False, type='bool'),
cooldown=dict(type='int', default=300),
disk_config=dict(choices=['auto', 'manual']),
files=dict(type='list', default=[]),
files=dict(type='dict', default={}),
flavor=dict(required=True),
image=dict(required=True),
key_name=dict(),
@ -298,6 +349,7 @@ def main():
networks=dict(type='list', default=['public', 'private']),
server_name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
user_data=dict(no_log=True),
)
)
@ -309,6 +361,7 @@ def main():
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
config_drive = module.params.get('config_drive')
cooldown = module.params.get('cooldown')
disk_config = module.params.get('disk_config')
if disk_config:
@ -325,6 +378,7 @@ def main():
networks = module.params.get('networks')
server_name = module.params.get('server_name')
state = module.params.get('state')
user_data = module.params.get('user_data')
if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000:
module.fail_json(msg='min_entities and max_entities must be an '
@ -340,7 +394,7 @@ def main():
key_name=key_name, loadbalancers=loadbalancers,
min_entities=min_entities, max_entities=max_entities,
name=name, networks=networks, server_name=server_name,
state=state)
state=state, config_drive=config_drive, user_data=user_data)
# import module snippets

0
cloud/vmware/__init__.py Normal file
View file

View file

@ -38,7 +38,7 @@ options:
description:
- The virtual server name you wish to manage.
required: true
user:
username:
description:
- Username to connect to vcenter as.
required: true
@ -65,9 +65,20 @@ options:
default: null
state:
description:
- Indicate desired state of the vm.
- Indicate desired state of the vm.
default: present
choices: ['present', 'powered_on', 'absent', 'powered_off', 'restarted', 'reconfigured']
choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured']
from_template:
version_added: "1.9"
description:
- Specifies if the VM should be deployed from a template (cannot be ran with state)
default: no
choices: ['yes', 'no']
template_src:
version_added: "1.9"
description:
- Name of the source template to deploy from
default: None
vm_disk:
description:
- A key, value list of disks and their sizes and which datastore to keep it in.
@ -181,6 +192,18 @@ EXAMPLES = '''
datacenter: MyDatacenter
hostname: esx001.mydomain.local
# Deploy a guest from a template
# No reconfiguration of the destination guest is done at this stage, a reconfigure would be needed to adjust memory/cpu etc..
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
from_template: yes
template_src: centosTemplate
cluster: MainCluster
resource_pool: "/Resources"
# Task to gather facts from a vSphere cluster only if the system is a VMWare guest
- vsphere_guest:
@ -192,12 +215,14 @@ EXAMPLES = '''
# Typical output of a vsphere_facts run on a guest
# If vmware tools is not installed, ipadresses with return None
- hw_eth0:
- addresstype: "assigned"
label: "Network adapter 1"
macaddress: "00:22:33:33:44:55"
macaddress_dash: "00-22-33-33-44-55"
ipaddresses: ['192.0.2.100', '2001:DB8:56ff:feac:4d8a']
summary: "VM Network"
hw_guest_full_name: "newvm001"
hw_guest_id: "rhel6_64Guest"
@ -207,7 +232,7 @@ EXAMPLES = '''
hw_product_uuid: "ef50bac8-2845-40ff-81d9-675315501dac"
# Remove a vm from vSphere
# The VM must be powered_off of you need to use force to force a shutdown
# The VM must be powered_off or you need to use force to force a shutdown
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
@ -488,6 +513,49 @@ def vmdisk_id(vm, current_datastore_name):
return id_list
def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name):
vmTemplate = vsphere_client.get_vm_by_name(template_src)
vmTarget = None
try:
cluster = [k for k,
v in vsphere_client.get_clusters().items() if v == cluster_name][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Cluster named: %s" %
cluster_name)
try:
rpmor = [k for k, v in vsphere_client.get_resource_pools(
from_mor=cluster).items()
if v == resource_pool][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Resource Pool named: %s" %
resource_pool)
try:
vmTarget = vsphere_client.get_vm_by_name(guest)
except Exception:
pass
if not vmTemplate.properties.config.template:
module.fail_json(
msg="Target %s is not a registered template" % template_src
)
try:
if vmTarget:
changed = False
else:
vmTemplate.clone(guest, resourcepool=rpmor)
changed = True
vsphere_client.disconnect()
module.exit_json(changed=changed)
except Exception as e:
module.fail_json(
msg="Could not clone selected machine: %s" % e
)
def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force):
spec = None
changed = False
@ -618,7 +686,16 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest,
hfmor = dcprops.hostFolder._obj
# virtualmachineFolder managed object reference
vmfmor = dcprops.vmFolder._obj
if vm_extra_config.get('folder'):
if vm_extra_config['folder'] not in vsphere_client._get_managed_objects(MORTypes.Folder).values():
vsphere_client.disconnect()
module.fail_json(msg="Cannot find folder named: %s" % vm_extra_config['folder'])
for mor, name in vsphere_client._get_managed_objects(MORTypes.Folder).iteritems():
if name == vm_extra_config['folder']:
vmfmor = mor
else:
vmfmor = dcprops.vmFolder._obj
# networkFolder managed object reference
nfmor = dcprops.networkFolder._obj
@ -936,6 +1013,11 @@ def gather_facts(vm):
'hw_processor_count': vm.properties.config.hardware.numCPU,
'hw_memtotal_mb': vm.properties.config.hardware.memoryMB,
}
netInfo = vm.get_property('net')
netDict = {}
if netInfo:
for net in netInfo:
netDict[net['mac_address']] = net['ip_addresses']
ifidx = 0
for entry in vm.properties.config.hardware.device:
@ -948,6 +1030,7 @@ def gather_facts(vm):
'addresstype': entry.addressType,
'label': entry.deviceInfo.label,
'macaddress': entry.macAddress,
'ipaddresses': netDict.get(entry.macAddress, None),
'macaddress_dash': entry.macAddress.replace(':', '-'),
'summary': entry.deviceInfo.summary,
}
@ -1066,6 +1149,8 @@ def main():
],
default='present'),
vmware_guest_facts=dict(required=False, choices=BOOLEANS),
from_template=dict(required=False, choices=BOOLEANS),
template_src=dict(required=False, type='str'),
guest=dict(required=True, type='str'),
vm_disk=dict(required=False, type='dict', default={}),
vm_nic=dict(required=False, type='dict', default={}),
@ -1080,7 +1165,7 @@ def main():
),
supports_check_mode=False,
mutually_exclusive=[['state', 'vmware_guest_facts']],
mutually_exclusive=[['state', 'vmware_guest_facts'],['state', 'from_template']],
required_together=[
['state', 'force'],
[
@ -1090,7 +1175,8 @@ def main():
'vm_hardware',
'esxi'
],
['resource_pool', 'cluster']
['resource_pool', 'cluster'],
['from_template', 'resource_pool', 'template_src']
],
)
@ -1112,6 +1198,8 @@ def main():
esxi = module.params['esxi']
resource_pool = module.params['resource_pool']
cluster = module.params['cluster']
template_src = module.params['template_src']
from_template = module.params['from_template']
# CONNECT TO THE SERVER
viserver = VIServer()
@ -1135,7 +1223,6 @@ def main():
except Exception, e:
module.fail_json(
msg="Fact gather failed with exception %s" % e)
# Power Changes
elif state in ['powered_on', 'powered_off', 'restarted']:
state_result = power_state(vm, state, force)
@ -1183,6 +1270,17 @@ def main():
module.fail_json(
msg="No such VM %s. Fact gathering requires an existing vm"
% guest)
elif from_template:
deploy_template(
vsphere_client=viserver,
esxi=esxi,
resource_pool=resource_pool,
guest=guest,
template_src=template_src,
module=module,
cluster_name=cluster
)
if state in ['restarted', 'reconfigured']:
module.fail_json(
msg="No such VM %s. States ["

View file

@ -18,6 +18,7 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import copy
import sys
import datetime
import traceback
@ -99,12 +100,22 @@ EXAMPLES = '''
creates: /path/to/database
'''
# Dict of options and their defaults
OPTIONS = {'chdir': None,
'creates': None,
'executable': None,
'NO_LOG': None,
'removes': None,
'warn': True,
}
# This is a pretty complex regex, which functions as follows:
#
# 1. (^|\s)
# ^ look for a space or the beginning of the line
# 2. (creates|removes|chdir|executable|NO_LOG)=
# ^ look for a valid param, followed by an '='
# 2. ({options_list})=
# ^ expanded to (chdir|creates|executable...)=
# look for a valid param, followed by an '='
# 3. (?P<quote>[\'"])?
# ^ look for an optional quote character, which can either be
# a single or double quote character, and store it for later
@ -114,8 +125,12 @@ EXAMPLES = '''
# ^ a non-escaped space or a non-escaped quote of the same kind
# that was matched in the first 'quote' is found, or the end of
# the line is reached
PARAM_REGEX = re.compile(r'(^|\s)(creates|removes|chdir|executable|NO_LOG|warn)=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)')
OPTIONS_REGEX = '|'.join(OPTIONS.keys())
PARAM_REGEX = re.compile(
r'(^|\s)({options_regex})=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)'.format(
options_regex=OPTIONS_REGEX
)
)
def check_command(commandline):
@ -148,7 +163,7 @@ def main():
args = module.params['args']
creates = module.params['creates']
removes = module.params['removes']
warn = module.params.get('warn', True)
warn = module.params['warn']
if args.strip() == '':
module.fail_json(rc=256, msg="no command given")
@ -232,13 +247,8 @@ class CommandModule(AnsibleModule):
def _load_params(self):
''' read the input and return a dictionary and the arguments string '''
args = MODULE_ARGS
params = {}
params['chdir'] = None
params['creates'] = None
params['removes'] = None
params['shell'] = False
params['executable'] = None
params['warn'] = True
params = copy.copy(OPTIONS)
params['shell'] = False
if "#USE_SHELL" in args:
args = args.replace("#USE_SHELL", "")
params['shell'] = True
@ -250,13 +260,8 @@ class CommandModule(AnsibleModule):
if '=' in x and not quoted:
# check to see if this is a special parameter for the command
k, v = x.split('=', 1)
v = unquote(v)
# because we're not breaking out quotes in the shlex split
# above, the value of the k=v pair may still be quoted. If
# so, remove them.
if len(v) > 1 and (v.startswith('"') and v.endswith('"') or v.startswith("'") and v.endswith("'")):
v = v[1:-1]
if k in ('creates', 'removes', 'chdir', 'executable', 'NO_LOG'):
v = unquote(v.strip())
if k in OPTIONS.keys():
if k == "chdir":
v = os.path.abspath(os.path.expanduser(v))
if not (os.path.exists(v) and os.path.isdir(v)):

View file

View file

@ -118,7 +118,7 @@ def db_exists(cursor, db):
return bool(res)
def db_delete(cursor, db):
query = "DROP DATABASE `%s`" % db
query = "DROP DATABASE %s" % mysql_quote_identifier(db, 'database')
cursor.execute(query)
return True
@ -190,12 +190,14 @@ def db_import(module, host, user, password, db_name, target, port, socket=None):
return rc, stdout, stderr
def db_create(cursor, db, encoding, collation):
query_params = dict(enc=encoding, collate=collation)
query = ['CREATE DATABASE %s' % mysql_quote_identifier(db, 'database')]
if encoding:
encoding = " CHARACTER SET %s" % encoding
query.append("CHARACTER SET %(enc)s")
if collation:
collation = " COLLATE %s" % collation
query = "CREATE DATABASE `%s`%s%s" % (db, encoding, collation)
res = cursor.execute(query)
query.append("COLLATE %(collate)s")
query = ' '.join(query)
res = cursor.execute(query, query_params)
return True
def strip_quotes(s):
@ -360,4 +362,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
main()
from ansible.module_utils.database import *
if __name__ == '__main__':
main()

View file

@ -117,6 +117,9 @@ EXAMPLES = """
# Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION'
- mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present
# Modifiy user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself.
- mysql_user: name=bob append=true priv=*.*:REQUIRESSL state=present
# Ensure no user named 'sally' exists, also passing in the auth credentials.
- mysql_user: login_user=root login_password=123456 name=sally state=absent
@ -151,6 +154,19 @@ except ImportError:
else:
mysqldb_found = True
VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION',
'LOCK TABLES', 'REFERENCES', 'EVENT', 'ALTER',
'DELETE', 'INDEX', 'INSERT', 'SELECT', 'UPDATE',
'CREATE TEMPORARY TABLES', 'TRIGGER', 'CREATE VIEW',
'SHOW VIEW', 'ALTER ROUTINE', 'CREATE ROUTINE',
'EXECUTE', 'FILE', 'CREATE TABLESPACE', 'CREATE USER',
'PROCESS', 'PROXY', 'RELOAD', 'REPLICATION CLIENT',
'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN',
'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE', 'REQUIRESSL'))
class InvalidPrivsError(Exception):
pass
# ===========================================
# MySQL module specific support methods.
#
@ -171,7 +187,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs):
changed = False
grant_option = False
# Handle passwords.
# Handle passwords
if password is not None:
cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host))
current_pass_hash = cursor.fetchone()
@ -181,7 +197,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs):
cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password))
changed = True
# Handle privileges.
# Handle privileges
if new_priv is not None:
curr_priv = privileges_get(cursor, user,host)
@ -217,7 +233,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs):
return changed
def user_delete(cursor, user, host):
cursor.execute("DROP USER %s@%s", (user,host))
cursor.execute("DROP USER %s@%s", (user, host))
return True
def privileges_get(cursor, user,host):
@ -231,7 +247,7 @@ def privileges_get(cursor, user,host):
The dictionary format is the same as that returned by privileges_unpack() below.
"""
output = {}
cursor.execute("SHOW GRANTS FOR %s@%s", (user,host))
cursor.execute("SHOW GRANTS FOR %s@%s", (user, host))
grants = cursor.fetchall()
def pick(x):
@ -243,11 +259,13 @@ def privileges_get(cursor, user,host):
for grant in grants:
res = re.match("GRANT (.+) ON (.+) TO '.+'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0])
if res is None:
module.fail_json(msg="unable to parse the MySQL grant string")
raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0])
privileges = res.group(1).split(", ")
privileges = [ pick(x) for x in privileges]
if "WITH GRANT OPTION" in res.group(4):
privileges.append('GRANT')
if "REQUIRE SSL" in res.group(4):
privileges.append('REQUIRESSL')
db = res.group(2)
output[db] = privileges
return output
@ -264,8 +282,8 @@ def privileges_unpack(priv):
not specified in the string, as MySQL will always provide this by default.
"""
output = {}
for item in priv.split('/'):
pieces = item.split(':')
for item in priv.strip().split('/'):
pieces = item.strip().split(':')
if '.' in pieces[0]:
pieces[0] = pieces[0].split('.')
for idx, piece in enumerate(pieces):
@ -274,27 +292,46 @@ def privileges_unpack(priv):
pieces[0] = '.'.join(pieces[0])
output[pieces[0]] = pieces[1].upper().split(',')
new_privs = frozenset(output[pieces[0]])
if not new_privs.issubset(VALID_PRIVS):
raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS))
if '*.*' not in output:
output['*.*'] = ['USAGE']
# if we are only specifying something like REQUIRESSL in *.* we still need
# to add USAGE as a privilege to avoid syntax errors
if priv.find('REQUIRESSL') != -1 and 'USAGE' not in output['*.*']:
output['*.*'].append('USAGE')
return output
def privileges_revoke(cursor, user,host,db_table,grant_option):
# Escape '%' since mysql db.execute() uses a format string
db_table = db_table.replace('%', '%%')
if grant_option:
query = "REVOKE GRANT OPTION ON %s FROM '%s'@'%s'" % (db_table,user,host)
cursor.execute(query)
query = "REVOKE ALL PRIVILEGES ON %s FROM '%s'@'%s'" % (db_table,user,host)
cursor.execute(query)
query = ["REVOKE GRANT OPTION ON %s" % mysql_quote_identifier(db_table, 'table')]
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
query = ["REVOKE ALL PRIVILEGES ON %s" % mysql_quote_identifier(db_table, 'table')]
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
def privileges_grant(cursor, user,host,db_table,priv):
priv_string = ",".join(filter(lambda x: x != 'GRANT', priv))
query = "GRANT %s ON %s TO '%s'@'%s'" % (priv_string,db_table,user,host)
# Escape '%' since mysql db.execute uses a format string and the
# specification of db and table often use a % (SQL wildcard)
db_table = db_table.replace('%', '%%')
priv_string = ",".join(filter(lambda x: x not in [ 'GRANT', 'REQUIRESSL' ], priv))
query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))]
query.append("TO %s@%s")
if 'GRANT' in priv:
query = query + " WITH GRANT OPTION"
cursor.execute(query)
query.append("WITH GRANT OPTION")
if 'REQUIRESSL' in priv:
query.append("REQUIRE SSL")
query = ' '.join(query)
cursor.execute(query, (user, host))
def strip_quotes(s):
""" Remove surrounding single or double quotes
@ -425,8 +462,8 @@ def main():
if priv is not None:
try:
priv = privileges_unpack(priv)
except:
module.fail_json(msg="invalid privileges string")
except Exception, e:
module.fail_json(msg="invalid privileges string: %s" % str(e))
# Either the caller passes both a username and password with which to connect to
# mysql, or they pass neither and allow this module to read the credentials from
@ -459,11 +496,17 @@ def main():
if state == "present":
if user_exists(cursor, user, host):
changed = user_mod(cursor, user, host, password, priv, append_privs)
try:
changed = user_mod(cursor, user, host, password, priv, append_privs)
except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e:
module.fail_json(msg=str(e))
else:
if password is None:
module.fail_json(msg="password parameter required when adding a user")
changed = user_add(cursor, user, host, password, priv)
try:
changed = user_add(cursor, user, host, password, priv)
except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e:
module.fail_json(msg=str(e))
elif state == "absent":
if user_exists(cursor, user, host):
changed = user_delete(cursor, user, host)
@ -473,4 +516,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
main()
from ansible.module_utils.database import *
if __name__ == '__main__':
main()

View file

@ -103,7 +103,7 @@ def typedvalue(value):
def getvariable(cursor, mysqlvar):
cursor.execute("SHOW VARIABLES LIKE '" + mysqlvar + "'")
cursor.execute("SHOW VARIABLES LIKE %s", (mysqlvar,))
mysqlvar_val = cursor.fetchall()
return mysqlvar_val
@ -116,8 +116,11 @@ def setvariable(cursor, mysqlvar, value):
should be passed as numeric literals.
"""
query = ["SET GLOBAL %s" % mysql_quote_identifier(mysqlvar, 'vars') ]
query.append(" = %s")
query = ' '.join(query)
try:
cursor.execute("SET GLOBAL " + mysqlvar + " = %s", (value,))
cursor.execute(query, (value,))
cursor.fetchall()
result = True
except Exception, e:
@ -242,7 +245,10 @@ def main():
value_actual = typedvalue(mysqlvar_val[0][1])
if value_wanted == value_actual:
module.exit_json(msg="Variable already set to requested value", changed=False)
result = setvariable(cursor, mysqlvar, value_wanted)
try:
result = setvariable(cursor, mysqlvar, value_wanted)
except SQLParseError, e:
result = str(e)
if result is True:
module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, changed=True)
else:
@ -250,4 +256,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.database import *
main()

View file

View file

@ -44,6 +44,11 @@ options:
- Host running the database
required: false
default: localhost
login_unix_socket:
description:
- Path to a Unix domain socket for local connections
required: false
default: null
owner:
description:
- Name of the role to set as owner of the database
@ -124,7 +129,9 @@ class NotSupportedError(Exception):
#
def set_owner(cursor, db, owner):
query = "ALTER DATABASE \"%s\" OWNER TO \"%s\"" % (db, owner)
query = "ALTER DATABASE %s OWNER TO %s" % (
pg_quote_identifier(db, 'database'),
pg_quote_identifier(owner, 'role'))
cursor.execute(query)
return True
@ -141,7 +148,7 @@ def get_db_info(cursor, db):
FROM pg_database JOIN pg_roles ON pg_roles.oid = pg_database.datdba
WHERE datname = %(db)s
"""
cursor.execute(query, {'db':db})
cursor.execute(query, {'db': db})
return cursor.fetchone()
def db_exists(cursor, db):
@ -151,32 +158,32 @@ def db_exists(cursor, db):
def db_delete(cursor, db):
if db_exists(cursor, db):
query = "DROP DATABASE \"%s\"" % db
query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database')
cursor.execute(query)
return True
else:
return False
def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype):
params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype)
if not db_exists(cursor, db):
query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')]
if owner:
owner = " OWNER \"%s\"" % owner
query_fragments.append('OWNER %s' % pg_quote_identifier(owner, 'role'))
if template:
template = " TEMPLATE \"%s\"" % template
query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database'))
if encoding:
encoding = " ENCODING '%s'" % encoding
query_fragments.append('ENCODING %(enc)s')
if lc_collate:
lc_collate = " LC_COLLATE '%s'" % lc_collate
query_fragments.append('LC_COLLATE %(collate)s')
if lc_ctype:
lc_ctype = " LC_CTYPE '%s'" % lc_ctype
query = 'CREATE DATABASE "%s"%s%s%s%s%s' % (db, owner,
template, encoding,
lc_collate, lc_ctype)
cursor.execute(query)
query_fragments.append('LC_CTYPE %(ctype)s')
query = ' '.join(query_fragments)
cursor.execute(query, params)
return True
else:
db_info = get_db_info(cursor, db)
if (encoding and
if (encoding and
get_encoding_id(cursor, encoding) != db_info['encoding_id']):
raise NotSupportedError(
'Changing database encoding is not supported. '
@ -202,7 +209,7 @@ def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype):
return False
else:
db_info = get_db_info(cursor, db)
if (encoding and
if (encoding and
get_encoding_id(cursor, encoding) != db_info['encoding_id']):
return False
elif lc_collate and lc_collate != db_info['lc_collate']:
@ -224,6 +231,7 @@ def main():
login_user=dict(default="postgres"),
login_password=dict(default=""),
login_host=dict(default=""),
login_unix_socket=dict(default=""),
port=dict(default="5432"),
db=dict(required=True, aliases=['name']),
owner=dict(default=""),
@ -249,7 +257,7 @@ def main():
state = module.params["state"]
changed = False
# To use defaults values, keyword arguments must be absent, so
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
@ -258,8 +266,14 @@ def main():
"login_password":"password",
"port":"port"
}
kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
if k in params_map and v != '' )
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
try:
db_connection = psycopg2.connect(database="template1", **kw)
# Enable autocommit so we can create databases
@ -284,13 +298,22 @@ def main():
module.exit_json(changed=changed,db=db)
if state == "absent":
changed = db_delete(cursor, db)
try:
changed = db_delete(cursor, db)
except SQLParseError, e:
module.fail_json(msg=str(e))
elif state == "present":
changed = db_create(cursor, db, owner, template, encoding,
try:
changed = db_create(cursor, db, owner, template, encoding,
lc_collate, lc_ctype)
except SQLParseError, e:
module.fail_json(msg=str(e))
except NotSupportedError, e:
module.fail_json(msg=str(e))
except SystemExit:
# Avoid catching this on Python 2.4
raise
except Exception, e:
module.fail_json(msg="Database query failed: %s" % e)
@ -298,4 +321,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
main()
from ansible.module_utils.database import *
if __name__ == '__main__':
main()

View file

@ -29,7 +29,7 @@ description:
options:
database:
description:
- Name of database to connect to.
- Name of database to connect to.
- 'Alias: I(db)'
required: yes
state:
@ -53,7 +53,7 @@ options:
schema, language, tablespace, group]
objs:
description:
- Comma separated list of database objects to set privileges on.
- Comma separated list of database objects to set privileges on.
- If I(type) is C(table) or C(sequence), the special value
C(ALL_IN_SCHEMA) can be provided instead to specify all database
objects of type I(type) in the schema specified via I(schema). (This
@ -99,6 +99,12 @@ options:
- Database port to connect to.
required: no
default: 5432
unix_socket:
description:
- Path to a Unix domain socket for local connections.
- 'Alias: I(login_unix_socket)'
required: false
default: null
login:
description:
- The username to authenticate with.
@ -135,7 +141,7 @@ author: Bernhard Weitzhofer
EXAMPLES = """
# On database "library":
# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors
# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors
# TO librarian, reader WITH GRANT OPTION
- postgresql_privs: >
database=library
@ -155,8 +161,8 @@ EXAMPLES = """
roles=librarian,reader
grant_option=yes
# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader
# Note that role "reader" will be *granted* INSERT privilege itself if this
# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader
# Note that role "reader" will be *granted* INSERT privilege itself if this
# isn't already the case (since state=present).
- postgresql_privs: >
db=library
@ -214,7 +220,7 @@ EXAMPLES = """
role=librarian
# GRANT ALL PRIVILEGES ON DATABASE library TO librarian
# If objs is omitted for type "database", it defaults to the database
# If objs is omitted for type "database", it defaults to the database
# to which the connection is established
- postgresql_privs: >
db=library
@ -230,6 +236,9 @@ except ImportError:
psycopg2 = None
VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE',
'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT',
'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE'))
class Error(Exception):
pass
@ -264,6 +273,12 @@ class Connection(object):
}
kw = dict( (params_map[k], getattr(params, k)) for k in params_map
if getattr(params, k) != '' )
# If a unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and params.unix_socket != "":
kw["host"] = params.unix_socket
self.connection = psycopg2.connect(**kw)
self.cursor = self.connection.cursor()
@ -386,9 +401,9 @@ class Connection(object):
def get_group_memberships(self, groups):
query = """SELECT roleid, grantor, member, admin_option
FROM pg_catalog.pg_auth_members am
FROM pg_catalog.pg_auth_members am
JOIN pg_catalog.pg_roles r ON r.oid = am.roleid
WHERE r.rolname = ANY(%s)
WHERE r.rolname = ANY(%s)
ORDER BY roleid, grantor, member"""
self.cursor.execute(query, (groups,))
return self.cursor.fetchall()
@ -402,14 +417,14 @@ class Connection(object):
:param obj_type: Type of database object to grant/revoke
privileges for.
:param privs: Either a list of privileges to grant/revoke
:param privs: Either a list of privileges to grant/revoke
or None if type is "group".
:param objs: List of database objects to grant/revoke
privileges for.
:param roles: Either a list of role names or "PUBLIC"
for the implicitly defined "PUBLIC" group
:param state: "present" to grant privileges, "absent" to revoke.
:param grant_option: Only for state "present": If True, set
:param grant_option: Only for state "present": If True, set
grant/admin option. If False, revoke it.
If None, don't change grant option.
:param schema_qualifier: Some object types ("TABLE", "SEQUENCE",
@ -454,19 +469,21 @@ class Connection(object):
else:
obj_ids = ['"%s"' % o for o in objs]
# set_what: SQL-fragment specifying what to set for the target roless:
# Either group membership or privileges on objects of a certain type.
# set_what: SQL-fragment specifying what to set for the target roles:
# Either group membership or privileges on objects of a certain type
if obj_type == 'group':
set_what = ','.join(obj_ids)
set_what = ','.join(pg_quote_identifier(i, 'role') for i in obj_ids)
else:
set_what = '%s ON %s %s' % (','.join(privs), obj_type,
','.join(obj_ids))
# Note: obj_type has been checked against a set of string literals
# and privs was escaped when it was parsed
set_what = '%s ON %s %s' % (','.join(privs), obj_type,
','.join(pg_quote_identifier(i, 'table') for i in obj_ids))
# for_whom: SQL-fragment specifying for whom to set the above
if roles == 'PUBLIC':
for_whom = 'PUBLIC'
else:
for_whom = ','.join(['"%s"' % r for r in roles])
for_whom = ','.join(pg_quote_identifier(r, 'role') for r in roles)
status_before = get_status(objs)
if state == 'present':
@ -476,7 +493,7 @@ class Connection(object):
else:
query = 'GRANT %s TO %s WITH GRANT OPTION'
else:
query = 'GRANT %s TO %s'
query = 'GRANT %s TO %s'
self.cursor.execute(query % (set_what, for_whom))
# Only revoke GRANT/ADMIN OPTION if grant_option actually is False.
@ -487,7 +504,7 @@ class Connection(object):
query = 'REVOKE GRANT OPTION FOR %s FROM %s'
self.cursor.execute(query % (set_what, for_whom))
else:
query = 'REVOKE %s FROM %s'
query = 'REVOKE %s FROM %s'
self.cursor.execute(query % (set_what, for_whom))
status_after = get_status(objs)
return status_before != status_after
@ -511,10 +528,11 @@ def main():
objs=dict(required=False, aliases=['obj']),
schema=dict(required=False),
roles=dict(required=True, aliases=['role']),
grant_option=dict(required=False, type='bool',
grant_option=dict(required=False, type='bool',
aliases=['admin_option']),
host=dict(default='', aliases=['login_host']),
port=dict(type='int', default=5432),
unix_socket=dict(default='', aliases=['login_unix_socket']),
login=dict(default='postgres', aliases=['login_user']),
password=dict(default='', aliases=['login_password'])
),
@ -558,7 +576,9 @@ def main():
try:
# privs
if p.privs:
privs = p.privs.split(',')
privs = frozenset(pr.upper() for pr in p.privs.split(','))
if not privs.issubset(VALID_PRIVS):
module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS))
else:
privs = None
@ -610,4 +630,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
main()
from ansible.module_utils.database import *
if __name__ == '__main__':
main()

View file

@ -78,6 +78,11 @@ options:
- Host running PostgreSQL.
required: false
default: localhost
login_unix_socket:
description:
- Path to a Unix domain socket for local connections
required: false
default: null
priv:
description:
- "PostgreSQL privileges string in the format: C(table:priv1,priv2)"
@ -145,6 +150,7 @@ INSERT,UPDATE/table:SELECT/anothertable:ALL
'''
import re
import itertools
try:
import psycopg2
@ -153,6 +159,19 @@ except ImportError:
else:
postgresqldb_found = True
_flags = ('SUPERUSER', 'CREATEROLE', 'CREATEUSER', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
VALID_FLAGS = frozenset(itertools.chain(_flags, ('NO%s' % f for f in _flags)))
VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL', 'USAGE')),
database=frozenset(('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL', 'USAGE')),
)
class InvalidFlagsError(Exception):
pass
class InvalidPrivsError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
@ -167,17 +186,18 @@ def user_exists(cursor, user):
return cursor.rowcount > 0
def user_add(cursor, user, password, role_attr_flags, encrypted, expires):
def user_add(cursor, user, password, role_attr_flags, encrypted, expires):
"""Create a new database user (role)."""
query_password_data = dict()
query = 'CREATE USER "%(user)s"' % { "user": user}
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a literal
query_password_data = dict(password=password, expires=expires)
query = ['CREATE USER %(user)s' % { "user": pg_quote_identifier(user, 'role')}]
if password is not None:
query = query + " WITH %(crypt)s" % { "crypt": encrypted }
query = query + " PASSWORD %(password)s"
query_password_data.update(password=password)
query.append("WITH %(crypt)s" % { "crypt": encrypted })
query.append("PASSWORD %(password)s")
if expires is not None:
query = query + " VALID UNTIL '%(expires)s'" % { "expires": expires }
query = query + " " + role_attr_flags
query.append("VALID UNTIL %(expires)s")
query.append(role_attr_flags)
query = ' '.join(query)
cursor.execute(query, query_password_data)
return True
@ -185,6 +205,7 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir
"""Change user password and/or attributes. Return True if changed, False otherwise."""
changed = False
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a literal
if user == 'PUBLIC':
if password is not None:
module.fail_json(msg="cannot change the password for PUBLIC user")
@ -196,25 +217,24 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir
# Handle passwords.
if password is not None or role_attr_flags is not None:
# Select password and all flag-like columns in order to verify changes.
query_password_data = dict()
query_password_data = dict(password=password, expires=expires)
select = "SELECT * FROM pg_authid where rolname=%(user)s"
cursor.execute(select, {"user": user})
# Grab current role attributes.
current_role_attrs = cursor.fetchone()
alter = 'ALTER USER "%(user)s"' % {"user": user}
alter = ['ALTER USER %(user)s' % {"user": pg_quote_identifier(user, 'role')}]
if password is not None:
query_password_data.update(password=password)
alter = alter + " WITH %(crypt)s" % {"crypt": encrypted}
alter = alter + " PASSWORD %(password)s"
alter = alter + " %(flags)s" % {'flags': role_attr_flags}
alter.append("WITH %(crypt)s" % {"crypt": encrypted})
alter.append("PASSWORD %(password)s")
alter.append(role_attr_flags)
elif role_attr_flags:
alter = alter + ' WITH ' + role_attr_flags
alter.append('WITH %s' % role_attr_flags)
if expires is not None:
alter = alter + " VALID UNTIL '%(expires)s'" % { "exipres": expires }
alter.append("VALID UNTIL %(expires)s")
try:
cursor.execute(alter, query_password_data)
cursor.execute(' '.join(alter), query_password_data)
except psycopg2.InternalError, e:
if e.pgcode == '25006':
# Handle errors due to read-only transactions indicated by pgcode 25006
@ -240,7 +260,7 @@ def user_delete(cursor, user):
"""Try to remove a user. Returns True if successful otherwise False"""
cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
try:
cursor.execute("DROP USER \"%s\"" % user)
cursor.execute("DROP USER %s" % pg_quote_identifier(user, 'role'))
except:
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
@ -264,36 +284,20 @@ def get_table_privileges(cursor, user, table):
cursor.execute(query, (user, table, schema))
return set([x[0] for x in cursor.fetchall()])
def quote_pg_identifier(identifier):
"""
quote postgresql identifiers involving zero or more namespaces
"""
if '"' in identifier:
# the user has supplied their own quoting. we have to hope they're
# doing it right. Maybe they have an unfortunately named table
# containing a period in the name, such as: "public"."users.2013"
return identifier
tokens = identifier.strip().split(".")
quoted_tokens = []
for token in tokens:
quoted_tokens.append('"%s"' % (token, ))
return ".".join(quoted_tokens)
def grant_table_privilege(cursor, user, table, priv):
# Note: priv escaped by parse_privs
prev_priv = get_table_privileges(cursor, user, table)
query = 'GRANT %s ON TABLE %s TO %s' % (
priv, quote_pg_identifier(table), quote_pg_identifier(user), )
priv, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') )
cursor.execute(query)
curr_priv = get_table_privileges(cursor, user, table)
return len(curr_priv) > len(prev_priv)
def revoke_table_privilege(cursor, user, table, priv):
# Note: priv escaped by parse_privs
prev_priv = get_table_privileges(cursor, user, table)
query = 'REVOKE %s ON TABLE %s FROM %s' % (
priv, quote_pg_identifier(table), quote_pg_identifier(user), )
priv, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') )
cursor.execute(query)
curr_priv = get_table_privileges(cursor, user, table)
return len(curr_priv) < len(prev_priv)
@ -324,21 +328,29 @@ def has_database_privilege(cursor, user, db, priv):
return cursor.fetchone()[0]
def grant_database_privilege(cursor, user, db, priv):
# Note: priv escaped by parse_privs
prev_priv = get_database_privileges(cursor, user, db)
if user == "PUBLIC":
query = 'GRANT %s ON DATABASE \"%s\" TO PUBLIC' % (priv, db)
query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
priv, pg_quote_identifier(db, 'database'))
else:
query = 'GRANT %s ON DATABASE \"%s\" TO \"%s\"' % (priv, db, user)
query = 'GRANT %s ON DATABASE %s TO %s' % (
priv, pg_quote_identifier(db, 'database'),
pg_quote_identifier(user, 'role'))
cursor.execute(query)
curr_priv = get_database_privileges(cursor, user, db)
return len(curr_priv) > len(prev_priv)
def revoke_database_privilege(cursor, user, db, priv):
# Note: priv escaped by parse_privs
prev_priv = get_database_privileges(cursor, user, db)
if user == "PUBLIC":
query = 'REVOKE %s ON DATABASE \"%s\" FROM PUBLIC' % (priv, db)
query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
priv, pg_quote_identifier(db, 'database'))
else:
query = 'REVOKE %s ON DATABASE \"%s\" FROM \"%s\"' % (priv, db, user)
query = 'REVOKE %s ON DATABASE %s FROM %s' % (
priv, pg_quote_identifier(db, 'database'),
pg_quote_identifier(user, 'role'))
cursor.execute(query)
curr_priv = get_database_privileges(cursor, user, db)
return len(curr_priv) < len(prev_priv)
@ -387,11 +399,20 @@ def parse_role_attrs(role_attr_flags):
Where:
attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
[ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB",
"[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ]
"""
if ',' not in role_attr_flags:
return role_attr_flags
flag_set = role_attr_flags.split(",")
o_flags = " ".join(flag_set)
if ',' in role_attr_flags:
flag_set = frozenset(r.upper() for r in role_attr_flags.split(","))
elif role_attr_flags:
flag_set = frozenset((role_attr_flags.upper(),))
else:
flag_set = frozenset()
if not flag_set.issubset(VALID_FLAGS):
raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
' '.join(flag_set.difference(VALID_FLAGS)))
o_flags = ' '.join(flag_set)
return o_flags
def parse_privs(privs, db):
@ -417,12 +438,15 @@ def parse_privs(privs, db):
if ':' not in token:
type_ = 'database'
name = db
priv_set = set(x.strip() for x in token.split(','))
priv_set = frozenset(x.strip().upper() for x in token.split(',') if x.strip())
else:
type_ = 'table'
name, privileges = token.split(':', 1)
priv_set = set(x.strip() for x in privileges.split(','))
priv_set = frozenset(x.strip().upper() for x in privileges.split(',') if x.strip())
if not priv_set.issubset(VALID_PRIVS[type_]):
raise InvalidPrivsError('Invalid privs specified for %s: %s' %
(type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
o_privs[type_][name] = priv_set
return o_privs
@ -437,6 +461,7 @@ def main():
login_user=dict(default="postgres"),
login_password=dict(default=""),
login_host=dict(default=""),
login_unix_socket=dict(default=""),
user=dict(required=True, aliases=['name']),
password=dict(default=None),
state=dict(default="present", choices=["absent", "present"]),
@ -460,7 +485,10 @@ def main():
module.fail_json(msg="privileges require a database to be specified")
privs = parse_privs(module.params["priv"], db)
port = module.params["port"]
role_attr_flags = parse_role_attrs(module.params["role_attr_flags"])
try:
role_attr_flags = parse_role_attrs(module.params["role_attr_flags"])
except InvalidFlagsError, e:
module.fail_json(msg=str(e))
if module.params["encrypted"]:
encrypted = "ENCRYPTED"
else:
@ -482,6 +510,12 @@ def main():
}
kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
if k in params_map and v != "" )
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
try:
db_connection = psycopg2.connect(**kw)
cursor = db_connection.cursor()
@ -494,18 +528,30 @@ def main():
if state == "present":
if user_exists(cursor, user):
changed = user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires)
try:
changed = user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires)
except SQLParseError, e:
module.fail_json(msg=str(e))
else:
changed = user_add(cursor, user, password, role_attr_flags, encrypted, expires)
changed = grant_privileges(cursor, user, privs) or changed
try:
changed = user_add(cursor, user, password, role_attr_flags, encrypted, expires)
except SQLParseError, e:
module.fail_json(msg=str(e))
try:
changed = grant_privileges(cursor, user, privs) or changed
except SQLParseError, e:
module.fail_json(msg=str(e))
else:
if user_exists(cursor, user):
if module.check_mode:
changed = True
kw['user_removed'] = True
else:
changed = revoke_privileges(cursor, user, privs)
user_removed = user_delete(cursor, user)
try:
changed = revoke_privileges(cursor, user, privs)
user_removed = user_delete(cursor, user)
except SQLParseError, e:
module.fail_json(msg=str(e))
changed = changed or user_removed
if fail_on_user and not user_removed:
msg = "unable to remove user"
@ -523,4 +569,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.database import *
main()

View file

@ -153,8 +153,9 @@ def main():
)
changed = False
pathmd5 = None
destmd5 = None
path_md5 = None # Deprecated
path_hash = None
dest_hash = None
src = os.path.expanduser(module.params['src'])
dest = os.path.expanduser(module.params['dest'])
backup = module.params['backup']
@ -175,23 +176,29 @@ def main():
module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (e, regexp))
path = assemble_from_fragments(src, delimiter, compiled_regexp)
pathmd5 = module.md5(path)
path_hash = module.sha1(path)
if os.path.exists(dest):
destmd5 = module.md5(dest)
dest_hash = module.sha1(dest)
if pathmd5 != destmd5:
if backup and destmd5 is not None:
if path_hash != dest_hash:
if backup and dest_hash is not None:
module.backup_local(dest)
shutil.copy(path, dest)
changed = True
# Backwards compat. This won't return data if FIPS mode is active
try:
pathmd5 = module.md5(path)
except ValueError:
pathmd5 = None
os.remove(path)
file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed)
# Mission complete
module.exit_json(src=src, dest=dest, md5sum=pathmd5, changed=changed, msg="OK")
module.exit_json(src=src, dest=dest, md5sum=pathmd5, checksum=path_hash, changed=changed, msg="OK")
# import module snippets
from ansible.module_utils.basic import *

View file

@ -27,7 +27,7 @@ module: copy
version_added: "historical"
short_description: Copies files to remote locations.
description:
- The M(copy) module copies a file on the local box to remote locations.
- The M(copy) module copies a file on the local box to remote locations. Use the M(fetch) module to copy files from remote locations to the local box.
options:
src:
description:
@ -167,8 +167,13 @@ def main():
if not os.access(src, os.R_OK):
module.fail_json(msg="Source %s not readable" % (src))
md5sum_src = module.md5(src)
md5sum_dest = None
checksum_src = module.sha1(src)
checksum_dest = None
# Backwards compat only. This will be None in FIPS mode
try:
md5sum_src = module.md5(src)
except ValueError:
md5sum_src = None
changed = False
@ -176,7 +181,7 @@ def main():
if original_basename and dest.endswith("/"):
dest = os.path.join(dest, original_basename)
dirname = os.path.dirname(dest)
if not os.path.exists(dirname):
if not os.path.exists(dirname) and '/' in dirname:
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname)
os.makedirs(dirname)
directory_args = module.load_file_common_arguments(module.params)
@ -198,7 +203,7 @@ def main():
basename = original_basename
dest = os.path.join(dest, basename)
if os.access(dest, os.R_OK):
md5sum_dest = module.md5(dest)
checksum_dest = module.sha1(dest)
else:
if not os.path.exists(os.path.dirname(dest)):
try:
@ -215,7 +220,7 @@ def main():
module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest)))
backup_file = None
if md5sum_src != md5sum_dest or os.path.islink(dest):
if checksum_src != checksum_dest or os.path.islink(dest):
try:
if backup:
if os.path.exists(dest):
@ -238,7 +243,7 @@ def main():
changed = False
res_args = dict(
dest = dest, src = src, md5sum = md5sum_src, changed = changed
dest = dest, src = src, md5sum = md5sum_src, checksum = checksum_src, changed = changed
)
if backup_file:
res_args['backup_file'] = backup_file

View file

@ -34,13 +34,14 @@ options:
required: false
choices: [ "yes", "no" ]
default: "no"
validate_md5:
validate_checksum:
version_added: "1.4"
description:
- Verify that the source and destination md5sums match after the files are fetched.
- Verify that the source and destination checksums match after the files are fetched.
required: false
choices: [ "yes", "no" ]
default: "yes"
aliases: [ "validate_md5" ]
flat:
version_added: "1.2"
description:

View file

@ -103,6 +103,23 @@ EXAMPLES = '''
'''
def get_state(path):
''' Find out current state '''
if os.path.lexists(path):
if os.path.islink(path):
return 'link'
elif os.path.isdir(path):
return 'directory'
elif os.stat(path).st_nlink > 1:
return 'hard'
else:
# could be many other things, but defaulting to file
return 'file'
return 'absent'
def main():
module = AnsibleModule(
@ -143,18 +160,7 @@ def main():
pass
module.exit_json(path=path, changed=False, appears_binary=appears_binary)
# Find out current state
prev_state = 'absent'
if os.path.lexists(path):
if os.path.islink(path):
prev_state = 'link'
elif os.path.isdir(path):
prev_state = 'directory'
elif os.stat(path).st_nlink > 1:
prev_state = 'hard'
else:
# could be many other things, but defaulting to file
prev_state = 'file'
prev_state = get_state(path)
# state should default to file, but since that creates many conflicts,
# default to 'current' when it exists.
@ -168,22 +174,24 @@ def main():
# or copy module, even if this module never uses it, it is needed to key off some things
if src is not None:
src = os.path.expanduser(src)
# original_basename is used by other modules that depend on file.
if os.path.isdir(path) and state not in ["link", "absent"]:
if params['original_basename']:
basename = params['original_basename']
else:
basename = os.path.basename(src)
params['path'] = path = os.path.join(path, basename)
else:
if state in ['link','hard']:
if follow:
if follow and state == 'link':
# use the current target of the link as the source
src = os.readlink(path)
else:
module.fail_json(msg='src and dest are required for creating links')
# original_basename is used by other modules that depend on file.
if os.path.isdir(path) and state not in ["link", "absent"]:
basename = None
if params['original_basename']:
basename = params['original_basename']
elif src is not None:
basename = os.path.basename(src)
if basename:
params['path'] = path = os.path.join(path, basename)
# make sure the target path is a directory when we're doing a recursive operation
recurse = params['recurse']
if recurse and state != 'directory':
@ -210,7 +218,15 @@ def main():
module.exit_json(path=path, changed=False)
elif state == 'file':
if state != prev_state:
if follow and prev_state == 'link':
# follow symlink and operate on original
path = os.readlink(path)
prev_state = get_state(path)
file_args['path'] = path
if prev_state not in ['file','hard']:
# file is not absent and any other state is a conflict
module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state))
@ -218,6 +234,11 @@ def main():
module.exit_json(path=path, changed=changed)
elif state == 'directory':
if follow and prev_state == 'link':
path = os.readlink(path)
prev_state = get_state(path)
if prev_state == 'absent':
if module.check_mode:
module.exit_json(changed=True)
@ -238,6 +259,10 @@ def main():
tmp_file_args['path']=curpath
changed = module.set_fs_attributes_if_different(tmp_file_args, changed)
# We already know prev_state is not 'absent', therefore it exists in some form.
elif prev_state != 'directory':
module.fail_json(path=path, msg='%s already exists as a %s' % (path, prev_state))
changed = module.set_fs_attributes_if_different(file_args, changed)
if recurse:
@ -330,13 +355,13 @@ def main():
open(path, 'w').close()
except OSError, e:
module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e))
elif prev_state in ['file', 'directory']:
elif prev_state in ['file', 'directory', 'hard']:
try:
os.utime(path, None)
except OSError, e:
module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e))
else:
module.fail_json(msg='Cannot touch other than files and directories')
module.fail_json(msg='Cannot touch other than files, directories, and hardlinks (%s is %s)' % (path, prev_state))
try:
module.set_fs_attributes_if_different(file_args, True)
except SystemExit, e:

View file

@ -148,7 +148,7 @@ EXAMPLES = r"""
- lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\1Xms${xms}m\3' backrefs=yes
# Validate a the sudoers file before saving
# Validate the sudoers file before saving
- lineinfile: dest=/etc/sudoers state=present regexp='^%ADMIN ALL\=' line='%ADMIN ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s'
"""
@ -192,7 +192,7 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
if not create:
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
destpath = os.path.dirname(dest)
if not os.path.exists(destpath):
if not os.path.exists(destpath) and not module.check_mode:
os.makedirs(destpath)
lines = []
else:
@ -282,6 +282,9 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
backupdest = module.backup_local(dest)
write_changes(module, lines, dest)
if module.check_mode and not os.path.exists(dest):
module.exit_json(changed=changed, msg=msg, backup=backupdest)
msg, changed = check_file_attrs(module, changed, msg)
module.exit_json(changed=changed, msg=msg, backup=backupdest)

View file

@ -36,10 +36,17 @@ options:
aliases: []
get_md5:
description:
- Whether to return the md5 sum of the file
- Whether to return the md5 sum of the file. Will return None if we're unable to use md5 (Common for FIPS-140 compliant systems)
required: false
default: yes
aliases: []
get_checksum:
description:
- Whether to return a checksum of the file (currently sha1)
required: false
default: yes
aliases: []
version_added: "1.8"
author: Bruce Pennypacker
'''
@ -51,12 +58,12 @@ EXAMPLES = '''
- fail: msg="Whoops! file ownership has changed"
when: st.stat.pw_name != 'root'
# Determine if a path exists and is a directory. Note we need to test
# Determine if a path exists and is a directory. Note that we need to test
# both that p.stat.isdir actually exists, and also that it's set to true.
- stat: path=/path/to/something
register: p
- debug: msg="Path exists and is a directory"
when: p.stat.isdir is defined and p.stat.isdir == true
when: p.stat.isdir is defined and p.stat.isdir
# Don't do md5 checksum
- stat: path=/path/to/myhugefile get_md5=no
@ -66,13 +73,15 @@ import os
import sys
from stat import *
import pwd
import grp
def main():
module = AnsibleModule(
argument_spec = dict(
path = dict(required=True),
follow = dict(default='no', type='bool'),
get_md5 = dict(default='yes', type='bool')
get_md5 = dict(default='yes', type='bool'),
get_checksum = dict(default='yes', type='bool')
),
supports_check_mode = True
)
@ -81,6 +90,7 @@ def main():
path = os.path.expanduser(path)
follow = module.params.get('follow')
get_md5 = module.params.get('get_md5')
get_checksum = module.params.get('get_checksum')
try:
if follow:
@ -99,6 +109,7 @@ def main():
# back to ansible
d = {
'exists' : True,
'path' : path,
'mode' : "%04o" % S_IMODE(mode),
'isdir' : S_ISDIR(mode),
'ischr' : S_ISCHR(mode),
@ -133,13 +144,23 @@ def main():
d['lnk_source'] = os.path.realpath(path)
if S_ISREG(mode) and get_md5 and os.access(path,os.R_OK):
d['md5'] = module.md5(path)
# Will fail on FIPS-140 compliant systems
try:
d['md5'] = module.md5(path)
except ValueError:
d['md5'] = None
if S_ISREG(mode) and get_checksum and os.access(path,os.R_OK):
d['checksum'] = module.sha1(path)
try:
pw = pwd.getpwuid(st.st_uid)
d['pw_name'] = pw.pw_name
grp_info = grp.getgrgid(pw.pw_gid)
d['gr_name'] = grp_info.gr_name
except:
pass

View file

@ -39,7 +39,7 @@ options:
version_added: "1.5"
mode:
description:
- Specify the direction of the synchroniztion. In push mode the localhost or delegate is the source; In pull mode the remote host in context is the source.
- Specify the direction of the synchronization. In push mode the localhost or delegate is the source; In pull mode the remote host in context is the source.
required: false
choices: [ 'push', 'pull' ]
default: 'push'
@ -145,15 +145,16 @@ options:
required: false
version_added: "1.6"
notes:
- rsync must be installed on both the local and remote machine.
- Inspect the verbose output to validate the destination user/host/path
are what was expected.
- The remote user for the dest path will always be the remote_user, not
the sudo_user.
the sudo_user.
- Expect that dest=~/x will be ~<remote_user>/x even if using sudo.
- To exclude files and directories from being synchronized, you may add
C(.rsync-filter) files to the source directory.
author: Timothy Appnel
'''
@ -180,7 +181,9 @@ local_action: synchronize src=some/relative/path dest=/some/absolute/path
pull mode
synchronize: mode=pull src=some/relative/path dest=/some/absolute/path
# Synchronization of src on delegate host to dest on the current inventory host
# Synchronization of src on delegate host to dest on the current inventory host.
# If delegate_to is set to the current inventory host, this can be used to syncronize
# two directories on that host.
synchronize: >
src=some/relative/path dest=/some/absolute/path
delegate_to: delegate.host

View file

@ -76,18 +76,35 @@ EXAMPLES = '''
'''
import os
from zipfile import ZipFile
class UnarchiveError(Exception):
pass
# class to handle .zip files
class ZipFile(object):
class ZipArchive(object):
def __init__(self, src, dest, module):
self.src = src
self.dest = dest
self.module = module
self.cmd_path = self.module.get_bin_path('unzip')
self._files_in_archive = []
def is_unarchived(self):
@property
def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
return self._files_in_archive
archive = ZipFile(self.src)
try:
self._files_in_archive = archive.namelist()
except:
raise UnarchiveError('Unable to list files in the archive')
return self._files_in_archive
def is_unarchived(self, mode, owner, group):
return dict(unarchived=False)
def unarchive(self):
@ -106,19 +123,57 @@ class ZipFile(object):
# class to handle gzipped tar files
class TgzFile(object):
class TgzArchive(object):
def __init__(self, src, dest, module):
self.src = src
self.dest = dest
self.module = module
self.cmd_path = self.module.get_bin_path('tar')
self.zipflag = 'z'
self._files_in_archive = []
def is_unarchived(self):
cmd = '%s -v -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src)
@property
def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
return self._files_in_archive
cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src)
rc, out, err = self.module.run_command(cmd)
if rc != 0:
raise UnarchiveError('Unable to list files in the archive')
for filename in out.splitlines():
if filename:
self._files_in_archive.append(filename)
return self._files_in_archive
def is_unarchived(self, mode, owner, group):
cmd = '%s -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src)
rc, out, err = self.module.run_command(cmd)
unarchived = (rc == 0)
if not unarchived:
# Check whether the differences are in something that we're
# setting anyway
# What will be set
to_be_set = set()
for perm in (('Mode', mode), ('Gid', group), ('Uid', owner)):
if perm[1] is not None:
to_be_set.add(perm[0])
# What is different
changes = set()
difference_re = re.compile(r': (.*) differs$')
for line in out.splitlines():
match = difference_re.search(line)
if not match:
# Unknown tar output. Assume we have changes
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
changes.add(match.groups()[0])
if changes and changes.issubset(to_be_set):
unarchived = True
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
def unarchive(self):
@ -129,47 +184,41 @@ class TgzFile(object):
def can_handle_archive(self):
if not self.cmd_path:
return False
cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src)
rc, out, err = self.module.run_command(cmd)
if rc == 0:
if len(out.splitlines(True)) > 0:
try:
if self.files_in_archive:
return True
except UnarchiveError:
pass
# Errors and no files in archive assume that we weren't able to
# properly unarchive it
return False
# class to handle tar files that aren't compressed
class TarFile(TgzFile):
class TarArchive(TgzArchive):
def __init__(self, src, dest, module):
self.src = src
self.dest = dest
self.module = module
self.cmd_path = self.module.get_bin_path('tar')
super(TarArchive, self).__init__(src, dest, module)
self.zipflag = ''
# class to handle bzip2 compressed tar files
class TarBzip(TgzFile):
class TarBzipArchive(TgzArchive):
def __init__(self, src, dest, module):
self.src = src
self.dest = dest
self.module = module
self.cmd_path = self.module.get_bin_path('tar')
super(TarBzipArchive, self).__init__(src, dest, module)
self.zipflag = 'j'
# class to handle xz compressed tar files
class TarXz(TgzFile):
class TarXzArchive(TgzArchive):
def __init__(self, src, dest, module):
self.src = src
self.dest = dest
self.module = module
self.cmd_path = self.module.get_bin_path('tar')
super(TarXzArchive, self).__init__(src, dest, module)
self.zipflag = 'J'
# try handlers in order and return the one that works or bail if none work
def pick_handler(src, dest, module):
handlers = [TgzFile, ZipFile, TarFile, TarBzip, TarXz]
handlers = [TgzArchive, ZipArchive, TarArchive, TarBzipArchive, TarXzArchive]
for handler in handlers:
obj = handler(src, dest, module)
if obj.can_handle_archive():
@ -193,7 +242,7 @@ def main():
src = os.path.expanduser(module.params['src'])
dest = os.path.expanduser(module.params['dest'])
copy = module.params['copy']
creates = module.params['creates']
file_args = module.load_file_common_arguments(module.params)
# did tar file arrive?
if not os.path.exists(src):
@ -204,20 +253,6 @@ def main():
if not os.access(src, os.R_OK):
module.fail_json(msg="Source '%s' not readable" % src)
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
v = os.path.expanduser(creates)
if os.path.exists(v):
module.exit_json(
stdout="skipped, since %s exists" % v,
skipped=True,
changed=False,
stderr=False,
rc=0
)
# is dest OK to receive tar file?
if not os.path.isdir(dest):
module.fail_json(msg="Destination '%s' is not a directory" % dest)
@ -229,23 +264,29 @@ def main():
res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)
# do we need to do unpack?
res_args['check_results'] = handler.is_unarchived()
res_args['check_results'] = handler.is_unarchived(file_args['mode'],
file_args['owner'], file_args['group'])
if res_args['check_results']['unarchived']:
res_args['changed'] = False
module.exit_json(**res_args)
else:
# do the unpack
try:
res_args['extract_results'] = handler.unarchive()
if res_args['extract_results']['rc'] != 0:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
except IOError:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest))
else:
res_args['changed'] = True
# do the unpack
try:
res_args['extract_results'] = handler.unarchive()
if res_args['extract_results']['rc'] != 0:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
except IOError:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest))
res_args['changed'] = True
# do we need to change perms?
for filename in handler.files_in_archive:
file_args['path'] = os.path.join(dest, filename)
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])
module.exit_json(**res_args)
# import module snippets
from ansible.module_utils.basic import *
main()
if __name__ == '__main__':
main()

View file

View file

@ -154,7 +154,7 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10):
if info['status'] == 304:
module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', ''))
# create a temporary file and copy content to do md5-based replacement
# create a temporary file and copy content to do checksum-based replacement
if info['status'] != 200:
module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest)
@ -241,8 +241,8 @@ def main():
filename = url_filename(info['url'])
dest = os.path.join(dest, filename)
md5sum_src = None
md5sum_dest = None
checksum_src = None
checksum_dest = None
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
@ -251,7 +251,7 @@ def main():
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
module.fail_json( msg="Source %s not readable" % (tmpsrc))
md5sum_src = module.md5(tmpsrc)
checksum_src = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
@ -262,13 +262,13 @@ def main():
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
module.fail_json( msg="Destination %s not readable" % (dest))
md5sum_dest = module.md5(dest)
checksum_dest = module.sha1(dest)
else:
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
module.fail_json( msg="Destination %s not writable" % (os.path.dirname(dest)))
if md5sum_src != md5sum_dest:
if checksum_src != checksum_dest:
try:
shutil.copyfile(tmpsrc, dest)
except Exception, err:
@ -303,8 +303,15 @@ def main():
file_args['path'] = dest
changed = module.set_fs_attributes_if_different(file_args, changed)
# Backwards compat only. We'll return None on FIPS enabled systems
try:
md5sum = module.md5(dest)
except ValueError:
md5sum = None
# Mission complete
module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum_src,
module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum, checksum=checksum_src,
sha256sum=sha256sum, changed=changed, msg=info.get('msg', ''))
# import module snippets

View file

@ -194,8 +194,8 @@ def write_file(module, url, dest, content):
module.fail_json(msg="failed to create temporary content file: %s" % str(err))
f.close()
md5sum_src = None
md5sum_dest = None
checksum_src = None
checksum_dest = None
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
@ -204,7 +204,7 @@ def write_file(module, url, dest, content):
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
module.fail_json( msg="Source %s not readable" % (tmpsrc))
md5sum_src = module.md5(tmpsrc)
checksum_src = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
@ -215,19 +215,19 @@ def write_file(module, url, dest, content):
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
module.fail_json( msg="Destination %s not readable" % (dest))
md5sum_dest = module.md5(dest)
checksum_dest = module.sha1(dest)
else:
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
module.fail_json( msg="Destination dir %s not writable" % (os.path.dirname(dest)))
if md5sum_src != md5sum_dest:
if checksum_src != checksum_dest:
try:
shutil.copyfile(tmpsrc, dest)
except Exception, err:
os.remove(tmpsrc)
module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err)))
os.remove(tmpsrc)
@ -426,7 +426,8 @@ def main():
uresp[ukey] = value
if 'content_type' in uresp:
if uresp['content_type'].startswith('application/json'):
if uresp['content_type'].startswith('application/json') or \
uresp['content_type'].startswith('text/json'):
try:
js = json.loads(content)
uresp['json'] = js

View file

Some files were not shown because too many files have changed in this diff Show more